NUMclapack.cpp 505 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358173591736017361173621736317364173651736617367173681736917370173711737217373173741737517376173771737817379173801738117382173831738417385173861738717388173891739017391173921739317394173951739617397173981739917400174011740217403174041740517406174071740817409174101741117412174131741417415174161741717418174191742017421174221742317424174251742617427174281742917430174311743217433174341743517436174371743817439174401744117442174431744417445174461744717448174491745017451174521745317454174551745617457174581745917460174611746217463174641746517466174671746817469174701747117472174731747417475174761747717478174791748017481174821748317484174851748617487174881748917490174911749217493174941749517496174971749817499175001750117502175031750417505175061750717508175091751017511175121751317514175151751617517175181751917520175211752217523175241752517526175271752817529175301753117532175331753417535175361753717538175391754017541175421754317544175451754617547175481754917550175511755217553175541755517556175571755817559175601756117562175631756417565175661756717568175691757017571175721757317574175751757617577175781757917580175811758217583175841758517586175871758817589175901759117592175931759417595175961759717598175991760017601176021760317604176051760617607176081760917610176111761217613176141761517616176171761817619176201762117622176231762417625176261762717628176291763017631176321763317634176351763617637176381763917640176411764217643176441764517646176471764817649176501765117652176531765417655176561765717658176591766017661176621766317664176651766617667176681766917670176711767217673176741767517676176771767817679176801768117682176831768417685176861768717688176891769017691176921769317694176951769617697176981769917700177011770217703177041770517706177071770817709177101771117712177131771417715177161771717718177191772017721177221772317724177251772617727177281772917730177311773217733177341773517736177371773817739177401774117742177431774417745177461774717748177491775017751177521775317754177551775617757177581775917760177611776217763177641776517766177671776817769177701777117772177731777417775177761777717778177791778017781177821778317784177851778617787177881778917790177911779217793177941779517796177971779817799178001780117802178031780417805178061780717808178091781017811178121781317814178151781617817178181781917820178211782217823178241782517826178271782817829178301783117832178331783417835178361783717838178391784017841178421784317844178451784617847178481784917850178511785217853178541785517856178571785817859178601786117862178631786417865178661786717868178691787017871178721787317874178751787617877178781787917880178811788217883178841788517886178871788817889178901789117892178931789417895178961789717898178991790017901179021790317904179051790617907179081790917910179111791217913179141791517916179171791817919179201792117922179231792417925179261792717928179291793017931179321793317934179351793617937179381793917940179411794217943179441794517946179471794817949179501795117952179531795417955179561795717958179591796017961179621796317964179651796617967179681796917970179711797217973179741797517976179771797817979179801798117982179831798417985179861798717988179891799017991179921799317994179951799617997179981799918000180011800218003180041800518006180071800818009180101801118012180131801418015180161801718018180191802018021180221802318024180251802618027180281802918030180311803218033180341803518036180371803818039180401804118042180431804418045180461804718048180491805018051180521805318054180551805618057180581805918060180611806218063180641806518066180671806818069180701807118072180731807418075180761807718078180791808018081180821808318084180851808618087180881808918090180911809218093180941809518096180971809818099181001810118102181031810418105181061810718108181091811018111181121811318114181151811618117181181811918120181211812218123181241812518126181271812818129181301813118132181331813418135181361813718138181391814018141181421814318144181451814618147181481814918150181511815218153181541815518156181571815818159181601816118162181631816418165181661816718168181691817018171181721817318174181751817618177181781817918180181811818218183181841818518186181871818818189181901819118192181931819418195181961819718198181991820018201182021820318204182051820618207182081820918210182111821218213182141821518216182171821818219182201822118222182231822418225182261822718228182291823018231182321823318234182351823618237182381823918240182411824218243182441824518246182471824818249182501825118252182531825418255182561825718258182591826018261182621826318264182651826618267182681826918270182711827218273182741827518276182771827818279182801828118282182831828418285182861828718288182891829018291182921829318294182951829618297182981829918300183011830218303183041830518306183071830818309183101831118312183131831418315183161831718318183191832018321183221832318324183251832618327183281832918330183311833218333183341833518336183371833818339183401834118342183431834418345183461834718348183491835018351183521835318354183551835618357183581835918360183611836218363183641836518366183671836818369183701837118372183731837418375183761837718378183791838018381183821838318384183851838618387183881838918390183911839218393183941839518396183971839818399184001840118402184031840418405184061840718408184091841018411184121841318414184151841618417184181841918420184211842218423184241842518426184271842818429184301843118432184331843418435184361843718438184391844018441184421844318444184451844618447184481844918450184511845218453184541845518456184571845818459184601846118462184631846418465184661846718468184691847018471184721847318474184751847618477184781847918480184811848218483184841848518486184871848818489184901849118492184931849418495184961849718498184991850018501185021850318504185051850618507185081850918510185111851218513185141851518516185171851818519185201852118522185231852418525185261852718528185291853018531185321853318534185351853618537185381853918540185411854218543185441854518546185471854818549185501855118552185531855418555185561855718558185591856018561185621856318564185651856618567185681856918570185711857218573185741857518576185771857818579185801858118582185831858418585185861858718588185891859018591185921859318594185951859618597185981859918600186011860218603186041860518606186071860818609186101861118612186131861418615186161861718618186191862018621186221862318624186251862618627186281862918630186311863218633186341863518636186371863818639186401864118642186431864418645186461864718648186491865018651186521865318654186551865618657186581865918660186611866218663186641866518666186671866818669186701867118672186731867418675186761867718678186791868018681186821868318684186851868618687186881868918690186911869218693186941869518696186971869818699187001870118702187031870418705187061870718708187091871018711187121871318714187151871618717187181871918720187211872218723187241872518726187271872818729187301873118732187331873418735187361873718738187391874018741187421874318744187451874618747187481874918750187511875218753187541875518756187571875818759187601876118762187631876418765187661876718768187691877018771187721877318774187751877618777187781877918780187811878218783187841878518786187871878818789187901879118792187931879418795187961879718798187991880018801188021880318804188051880618807188081880918810188111881218813188141881518816188171881818819188201882118822188231882418825188261882718828188291883018831188321883318834188351883618837188381883918840188411884218843188441884518846188471884818849188501885118852188531885418855188561885718858188591886018861188621886318864188651886618867188681886918870188711887218873188741887518876188771887818879188801888118882188831888418885188861888718888188891889018891188921889318894188951889618897188981889918900189011890218903189041890518906189071890818909189101891118912189131891418915189161891718918189191892018921189221892318924189251892618927189281892918930189311893218933189341893518936189371893818939189401894118942189431894418945189461894718948189491895018951189521895318954189551895618957189581895918960189611896218963189641896518966189671896818969189701897118972189731897418975189761897718978189791898018981189821898318984189851898618987189881898918990189911899218993189941899518996189971899818999190001900119002190031900419005190061900719008190091901019011190121901319014190151901619017190181901919020190211902219023190241902519026190271902819029190301903119032190331903419035190361903719038190391904019041190421904319044190451904619047190481904919050190511905219053190541905519056190571905819059190601906119062190631906419065190661906719068190691907019071190721907319074190751907619077190781907919080190811908219083190841908519086190871908819089190901909119092190931909419095190961909719098190991910019101191021910319104191051910619107191081910919110191111911219113191141911519116191171911819119191201912119122191231912419125191261912719128191291913019131191321913319134191351913619137191381913919140191411914219143191441914519146191471914819149191501915119152191531915419155191561915719158191591916019161191621916319164191651916619167191681916919170191711917219173191741917519176191771917819179191801918119182191831918419185191861918719188191891919019191191921919319194191951919619197191981919919200192011920219203192041920519206192071920819209192101921119212192131921419215192161921719218192191922019221192221922319224192251922619227192281922919230192311923219233192341923519236192371923819239192401924119242192431924419245192461924719248192491925019251192521925319254192551925619257192581925919260192611926219263192641926519266192671926819269192701927119272192731927419275192761927719278192791928019281192821928319284192851928619287192881928919290192911929219293192941929519296192971929819299193001930119302193031930419305193061930719308193091931019311193121931319314193151931619317193181931919320193211932219323193241932519326193271932819329193301933119332193331933419335193361933719338193391934019341193421934319344193451934619347193481934919350193511935219353193541935519356193571935819359193601936119362193631936419365193661936719368193691937019371193721937319374193751937619377193781937919380193811938219383193841938519386193871938819389193901939119392193931939419395193961939719398193991940019401194021940319404194051940619407194081940919410194111941219413194141941519416194171941819419194201942119422194231942419425194261942719428194291943019431194321943319434194351943619437194381943919440194411944219443194441944519446194471944819449194501945119452194531945419455194561945719458194591946019461194621946319464194651946619467194681946919470194711947219473194741947519476194771947819479194801948119482194831948419485194861948719488194891949019491194921949319494194951949619497194981949919500195011950219503195041950519506195071950819509195101951119512195131951419515195161951719518195191952019521195221952319524195251952619527195281952919530195311953219533195341953519536195371953819539195401954119542195431954419545195461954719548195491955019551195521955319554195551955619557195581955919560195611956219563195641956519566195671956819569195701957119572195731957419575195761957719578195791958019581195821958319584195851958619587195881958919590195911959219593195941959519596195971959819599196001960119602196031960419605196061960719608196091961019611196121961319614196151961619617196181961919620196211962219623196241962519626196271962819629196301963119632196331963419635196361963719638196391964019641196421964319644196451964619647196481964919650196511965219653196541965519656196571965819659196601966119662196631966419665196661966719668196691967019671196721967319674196751967619677196781967919680196811968219683196841968519686196871968819689196901969119692196931969419695196961969719698196991970019701197021970319704197051970619707197081970919710197111971219713197141971519716197171971819719197201972119722197231972419725197261972719728197291973019731197321973319734197351973619737197381973919740197411974219743197441974519746197471974819749197501975119752197531975419755197561975719758197591976019761197621976319764197651976619767197681976919770197711977219773197741977519776197771977819779197801978119782197831978419785197861978719788197891979019791197921979319794197951979619797197981979919800198011980219803198041980519806198071980819809198101981119812198131981419815198161981719818198191982019821198221982319824198251982619827198281982919830198311983219833198341983519836198371983819839198401984119842198431984419845198461984719848198491985019851198521985319854198551985619857198581985919860198611986219863198641986519866198671986819869198701987119872198731987419875198761987719878198791988019881198821988319884198851988619887198881988919890198911989219893198941989519896198971989819899199001990119902199031990419905199061990719908199091991019911199121991319914199151991619917199181991919920199211992219923199241992519926199271992819929199301993119932199331993419935199361993719938199391994019941199421994319944199451994619947199481994919950199511995219953199541995519956199571995819959199601996119962199631996419965199661996719968199691997019971199721997319974199751997619977199781997919980199811998219983199841998519986199871998819989199901999119992199931999419995199961999719998199992000020001200022000320004200052000620007200082000920010200112001220013200142001520016200172001820019200202002120022200232002420025200262002720028200292003020031200322003320034200352003620037200382003920040200412004220043200442004520046200472004820049200502005120052200532005420055200562005720058200592006020061200622006320064200652006620067200682006920070200712007220073200742007520076200772007820079200802008120082200832008420085200862008720088200892009020091200922009320094200952009620097200982009920100201012010220103201042010520106201072010820109201102011120112201132011420115201162011720118201192012020121201222012320124201252012620127201282012920130201312013220133201342013520136201372013820139201402014120142201432014420145201462014720148201492015020151201522015320154201552015620157201582015920160201612016220163201642016520166201672016820169201702017120172201732017420175201762017720178201792018020181201822018320184201852018620187201882018920190201912019220193201942019520196201972019820199202002020120202202032020420205202062020720208202092021020211202122021320214202152021620217202182021920220202212022220223202242022520226202272022820229202302023120232202332023420235202362023720238202392024020241202422024320244202452024620247202482024920250202512025220253202542025520256202572025820259202602026120262202632026420265202662026720268202692027020271202722027320274202752027620277202782027920280202812028220283202842028520286202872028820289202902029120292202932029420295202962029720298202992030020301203022030320304203052030620307203082030920310203112031220313203142031520316203172031820319203202032120322203232032420325203262032720328203292033020331203322033320334203352033620337203382033920340203412034220343203442034520346203472034820349203502035120352
  1. /* NUMclapack.c */
  2. /* -- LAPACK driver routines (version 3.0) -- Univ. of Tennessee, Univ. of
  3. California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab,
  4. and Rice University October 31, 1999 -- translated by f2c (version
  5. 19990503).
  6. Adapted by David Weenink 20021201
  7. djmw 20030205 Latest modification
  8. */
  9. /* #include "blaswrap.h" */
  10. #include "NUMf2c.h"
  11. #include "NUMclapack.h"
  12. #include "NUMcblas.h"
  13. #include "NUM2.h"
  14. #include "melder.h"
  15. #define FALSE 0
  16. #define TRUE 1
  17. /* Table of constant values */
  18. static integer c__0 = 0;
  19. static integer c__1 = 1;
  20. static integer c_n1 = -1;
  21. static integer c__2 = 2;
  22. static integer c__3 = 3;
  23. static integer c__4 = 4;
  24. static integer c__6 = 6;
  25. static integer c__10 = 10;
  26. static integer c__11 = 11;
  27. static double c_b15 = -.125;
  28. static double c_b49 = 1.;
  29. static double c_b72 = -1.;
  30. static double c_b74 = 0.;
  31. static double c_b108 = 1.;
  32. static double c_b416 = 0.;
  33. static double c_b438 = 1.;
  34. #define MAX(m,n) ((m) > (n) ? (m) : (n))
  35. #define MIN(m,n) ((m) < (n) ? (m) : (n))
  36. #define a_ref(a_1,a_2) a[(a_2)*a_dim1 + a_1]
  37. #define c___ref(a_1,a_2) c__[(a_2)*c_dim1 + a_1]
  38. /* --------------------------------------------------- */
  39. #define u_ref(a_1,a_2) u[(a_2)*u_dim1 + a_1]
  40. #define vt_ref(a_1,a_2) vt[(a_2)*vt_dim1 + a_1]
  41. int NUMlapack_dbdsqr (const char *uplo, integer *n, integer *ncvt, integer *nru, integer *ncc, double *d__, double *e, double *vt,
  42. integer *ldvt, double *u, integer *ldu, double *c__, integer *ldc, double *work, integer *info) {
  43. /* System generated locals */
  44. integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
  45. double d__1, d__2, d__3, d__4;
  46. /* Local variables */
  47. static double abse;
  48. static integer idir;
  49. static double abss;
  50. static integer oldm;
  51. static double cosl;
  52. static integer isub, iter;
  53. static double unfl, sinl, cosr, smin, smax, sinr;
  54. static double f, g, h__;
  55. static integer i__, j, m;
  56. static double r__;
  57. static double oldcs;
  58. static integer oldll;
  59. static double shift, sigmn, oldsn;
  60. static integer maxit;
  61. static double sminl, sigmx;
  62. static integer lower;
  63. static double cs;
  64. static integer ll;
  65. static double sn, mu;
  66. static double sminoa, thresh;
  67. static integer rotate;
  68. static double sminlo;
  69. static integer nm1;
  70. static double tolmul;
  71. static integer nm12, nm13, lll;
  72. static double eps, sll, tol;
  73. /* Parameter adjustments */
  74. --d__;
  75. --e;
  76. vt_dim1 = *ldvt;
  77. vt_offset = 1 + vt_dim1 * 1;
  78. vt -= vt_offset;
  79. u_dim1 = *ldu;
  80. u_offset = 1 + u_dim1 * 1;
  81. u -= u_offset;
  82. c_dim1 = *ldc;
  83. c_offset = 1 + c_dim1 * 1;
  84. c__ -= c_offset;
  85. --work;
  86. /* Function Body */
  87. *info = 0;
  88. lower = lsame_ (uplo, "L");
  89. if (!lsame_ (uplo, "U") && !lower) {
  90. *info = -1;
  91. } else if (*n < 0) {
  92. *info = -2;
  93. } else if (*ncvt < 0) {
  94. *info = -3;
  95. } else if (*nru < 0) {
  96. *info = -4;
  97. } else if (*ncc < 0) {
  98. *info = -5;
  99. } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < MAX (1, *n)) {
  100. *info = -9;
  101. } else if (*ldu < MAX (1, *nru)) {
  102. *info = -11;
  103. } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < MAX (1, *n)) {
  104. *info = -13;
  105. }
  106. if (*info != 0) {
  107. i__1 = - (*info);
  108. xerbla_ ("DBDSQR", &i__1);
  109. return 0;
  110. }
  111. if (*n == 0) {
  112. return 0;
  113. }
  114. if (*n == 1) {
  115. goto L160;
  116. }
  117. /* ROTATE is true if any singular vectors desired, false otherwise */
  118. rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
  119. /* If no singular vectors desired, use qd algorithm */
  120. if (!rotate) {
  121. NUMlapack_dlasq1 (n, &d__[1], &e[1], &work[1], info);
  122. return 0;
  123. }
  124. nm1 = *n - 1;
  125. nm12 = nm1 + nm1;
  126. nm13 = nm12 + nm1;
  127. idir = 0;
  128. /* Get machine constants */
  129. eps = NUMblas_dlamch ("Epsilon");
  130. unfl = NUMblas_dlamch ("Safe minimum");
  131. /* If matrix lower bidiagonal, rotate to be upper bidiagonal by applying
  132. Givens rotations on the left */
  133. if (lower) {
  134. i__1 = *n - 1;
  135. for (i__ = 1; i__ <= i__1; ++i__) {
  136. NUMlapack_dlartg (&d__[i__], &e[i__], &cs, &sn, &r__);
  137. d__[i__] = r__;
  138. e[i__] = sn * d__[i__ + 1];
  139. d__[i__ + 1] = cs * d__[i__ + 1];
  140. work[i__] = cs;
  141. work[nm1 + i__] = sn;
  142. /* L10: */
  143. }
  144. /* Update singular vectors if desired */
  145. if (*nru > 0) {
  146. NUMlapack_dlasr ("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset], ldu);
  147. }
  148. if (*ncc > 0) {
  149. NUMlapack_dlasr ("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset], ldc);
  150. }
  151. }
  152. /* Compute singular values to relative accuracy TOL (By setting TOL to be
  153. negative, algorithm will compute singular values to absolute accuracy
  154. ABS(TOL)*norm(input matrix))
  155. Computing MAX Computing MIN */
  156. d__3 = 100., d__4 = pow (eps, c_b15);
  157. d__1 = 10., d__2 = MIN (d__3, d__4);
  158. tolmul = MAX (d__1, d__2);
  159. tol = tolmul * eps;
  160. /* Compute approximate maximum, minimum singular values */
  161. smax = 0.;
  162. i__1 = *n;
  163. for (i__ = 1; i__ <= i__1; ++i__) {
  164. /* Computing MAX */
  165. d__2 = smax, d__3 = (d__1 = d__[i__], fabs (d__1));
  166. smax = MAX (d__2, d__3);
  167. /* L20: */
  168. }
  169. i__1 = *n - 1;
  170. for (i__ = 1; i__ <= i__1; ++i__) {
  171. /* Computing MAX */
  172. d__2 = smax, d__3 = (d__1 = e[i__], fabs (d__1));
  173. smax = MAX (d__2, d__3);
  174. /* L30: */
  175. }
  176. sminl = 0.;
  177. if (tol >= 0.) {
  178. /* Relative accuracy desired */
  179. sminoa = fabs (d__[1]);
  180. if (sminoa == 0.) {
  181. goto L50;
  182. }
  183. mu = sminoa;
  184. i__1 = *n;
  185. for (i__ = 2; i__ <= i__1; ++i__) {
  186. mu = (d__2 = d__[i__], fabs (d__2)) * (mu / (mu + (d__1 = e[i__ - 1], fabs (d__1))));
  187. sminoa = MIN (sminoa, mu);
  188. if (sminoa == 0.) {
  189. goto L50;
  190. }
  191. /* L40: */
  192. }
  193. L50:
  194. sminoa /= sqrt ( (double) (*n));
  195. /* Computing MAX */
  196. d__1 = tol * sminoa, d__2 = *n * 6 * *n * unfl;
  197. thresh = MAX (d__1, d__2);
  198. } else {
  199. /* Absolute accuracy desired
  200. Computing MAX */
  201. d__1 = fabs (tol) * smax, d__2 = *n * 6 * *n * unfl;
  202. thresh = MAX (d__1, d__2);
  203. }
  204. /* Prepare for main iteration loop for the singular values (MAXIT is the
  205. maximum number of passes through the inner loop permitted before
  206. nonconvergence signalled.) */
  207. maxit = *n * 6 * *n;
  208. iter = 0;
  209. oldll = -1;
  210. oldm = -1;
  211. /* M points to last element of unconverged part of matrix */
  212. m = *n;
  213. /* Begin main iteration loop */
  214. L60:
  215. /* Check for convergence or exceeding iteration count */
  216. if (m <= 1) {
  217. goto L160;
  218. }
  219. if (iter > maxit) {
  220. goto L200;
  221. }
  222. /* Find diagonal block of matrix to work on */
  223. if (tol < 0. && (d__1 = d__[m], fabs (d__1)) <= thresh) {
  224. d__[m] = 0.;
  225. }
  226. smax = (d__1 = d__[m], fabs (d__1));
  227. smin = smax;
  228. i__1 = m - 1;
  229. for (lll = 1; lll <= i__1; ++lll) {
  230. ll = m - lll;
  231. abss = (d__1 = d__[ll], fabs (d__1));
  232. abse = (d__1 = e[ll], fabs (d__1));
  233. if (tol < 0. && abss <= thresh) {
  234. d__[ll] = 0.;
  235. }
  236. if (abse <= thresh) {
  237. goto L80;
  238. }
  239. smin = MIN (smin, abss);
  240. /* Computing MAX */
  241. d__1 = MAX (smax, abss);
  242. smax = MAX (d__1, abse);
  243. /* L70: */
  244. }
  245. ll = 0;
  246. goto L90;
  247. L80:
  248. e[ll] = 0.;
  249. /* Matrix splits since E(LL) = 0 */
  250. if (ll == m - 1) {
  251. /* Convergence of bottom singular value, return to top of loop */
  252. --m;
  253. goto L60;
  254. }
  255. L90:
  256. ++ll;
  257. /* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */
  258. if (ll == m - 1) {
  259. /* 2 by 2 block, handle separately */
  260. NUMlapack_dlasv2 (&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr, &sinl, &cosl);
  261. d__[m - 1] = sigmx;
  262. e[m - 1] = 0.;
  263. d__[m] = sigmn;
  264. /* Compute singular vectors, if desired */
  265. if (*ncvt > 0) {
  266. NUMblas_drot (ncvt, &vt_ref (m - 1, 1), ldvt, &vt_ref (m, 1), ldvt, &cosr, &sinr);
  267. }
  268. if (*nru > 0) {
  269. NUMblas_drot (nru, &u_ref (1, m - 1), &c__1, &u_ref (1, m), &c__1, &cosl, &sinl);
  270. }
  271. if (*ncc > 0) {
  272. NUMblas_drot (ncc, &c___ref (m - 1, 1), ldc, &c___ref (m, 1), ldc, &cosl, &sinl);
  273. }
  274. m += -2;
  275. goto L60;
  276. }
  277. /* If working on new submatrix, choose shift direction (from larger end
  278. diagonal element towards smaller) */
  279. if (ll > oldm || m < oldll) {
  280. if ( (d__1 = d__[ll], fabs (d__1)) >= (d__2 = d__[m], fabs (d__2))) {
  281. /* Chase bulge from top (big end) to bottom (small end) */
  282. idir = 1;
  283. } else {
  284. /* Chase bulge from bottom (big end) to top (small end) */
  285. idir = 2;
  286. }
  287. }
  288. /* Apply convergence tests */
  289. if (idir == 1) {
  290. /* Run convergence test in forward direction First apply standard
  291. test to bottom of matrix */
  292. if ( (d__2 = e[m - 1], fabs (d__2)) <= fabs (tol) * (d__1 = d__[m], fabs (d__1)) || tol < 0. &&
  293. (d__3 = e[m - 1], fabs (d__3)) <= thresh) {
  294. e[m - 1] = 0.;
  295. goto L60;
  296. }
  297. if (tol >= 0.) {
  298. /* If relative accuracy desired, apply convergence criterion
  299. forward */
  300. mu = (d__1 = d__[ll], fabs (d__1));
  301. sminl = mu;
  302. i__1 = m - 1;
  303. for (lll = ll; lll <= i__1; ++lll) {
  304. if ( (d__1 = e[lll], fabs (d__1)) <= tol * mu) {
  305. e[lll] = 0.;
  306. goto L60;
  307. }
  308. sminlo = sminl;
  309. mu = (d__2 = d__[lll + 1], fabs (d__2)) * (mu / (mu + (d__1 = e[lll], fabs (d__1))));
  310. sminl = MIN (sminl, mu);
  311. /* L100: */
  312. }
  313. }
  314. } else {
  315. /* Run convergence test in backward direction First apply standard
  316. test to top of matrix */
  317. if ( (d__2 = e[ll], fabs (d__2)) <= fabs (tol) * (d__1 = d__[ll], fabs (d__1)) || tol < 0. &&
  318. (d__3 = e[ll], fabs (d__3)) <= thresh) {
  319. e[ll] = 0.;
  320. goto L60;
  321. }
  322. if (tol >= 0.) {
  323. /* If relative accuracy desired, apply convergence criterion
  324. backward */
  325. mu = (d__1 = d__[m], fabs (d__1));
  326. sminl = mu;
  327. i__1 = ll;
  328. for (lll = m - 1; lll >= i__1; --lll) {
  329. if ( (d__1 = e[lll], fabs (d__1)) <= tol * mu) {
  330. e[lll] = 0.;
  331. goto L60;
  332. }
  333. sminlo = sminl;
  334. mu = (d__2 = d__[lll], fabs (d__2)) * (mu / (mu + (d__1 = e[lll], fabs (d__1))));
  335. sminl = MIN (sminl, mu);
  336. /* L110: */
  337. }
  338. }
  339. }
  340. oldll = ll;
  341. oldm = m;
  342. /* Compute shift. First, test if shifting would ruin relative accuracy,
  343. and if so set the shift to zero.
  344. Computing MAX */
  345. d__1 = eps, d__2 = tol * .01;
  346. if (tol >= 0. && *n * tol * (sminl / smax) <= MAX (d__1, d__2)) {
  347. /* Use a zero shift to avoid loss of relative accuracy */
  348. shift = 0.;
  349. } else {
  350. /* Compute the shift from 2-by-2 block at end of matrix */
  351. if (idir == 1) {
  352. sll = (d__1 = d__[ll], fabs (d__1));
  353. NUMlapack_dlas2 (&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
  354. } else {
  355. sll = (d__1 = d__[m], fabs (d__1));
  356. NUMlapack_dlas2 (&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
  357. }
  358. /* Test if shift negligible, and if so set to zero */
  359. if (sll > 0.) {
  360. /* Computing 2nd power */
  361. d__1 = shift / sll;
  362. if (d__1 * d__1 < eps) {
  363. shift = 0.;
  364. }
  365. }
  366. }
  367. /* Increment iteration count */
  368. iter = iter + m - ll;
  369. /* If SHIFT = 0, do simplified QR iteration */
  370. if (shift == 0.) {
  371. if (idir == 1) {
  372. /* Chase bulge from top to bottom Save cosines and sines for
  373. later singular vector updates */
  374. cs = 1.;
  375. oldcs = 1.;
  376. i__1 = m - 1;
  377. for (i__ = ll; i__ <= i__1; ++i__) {
  378. d__1 = d__[i__] * cs;
  379. NUMlapack_dlartg (&d__1, &e[i__], &cs, &sn, &r__);
  380. if (i__ > ll) {
  381. e[i__ - 1] = oldsn * r__;
  382. }
  383. d__1 = oldcs * r__;
  384. d__2 = d__[i__ + 1] * sn;
  385. NUMlapack_dlartg (&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
  386. work[i__ - ll + 1] = cs;
  387. work[i__ - ll + 1 + nm1] = sn;
  388. work[i__ - ll + 1 + nm12] = oldcs;
  389. work[i__ - ll + 1 + nm13] = oldsn;
  390. /* L120: */
  391. }
  392. h__ = d__[m] * cs;
  393. d__[m] = h__ * oldcs;
  394. e[m - 1] = h__ * oldsn;
  395. /* Update singular vectors */
  396. if (*ncvt > 0) {
  397. i__1 = m - ll + 1;
  398. NUMlapack_dlasr ("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt_ref (ll, 1), ldvt);
  399. }
  400. if (*nru > 0) {
  401. i__1 = m - ll + 1;
  402. NUMlapack_dlasr ("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 + 1], &u_ref (1, ll),
  403. ldu);
  404. }
  405. if (*ncc > 0) {
  406. i__1 = m - ll + 1;
  407. NUMlapack_dlasr ("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 + 1], &c___ref (ll, 1),
  408. ldc);
  409. }
  410. /* Test convergence */
  411. if ( (d__1 = e[m - 1], fabs (d__1)) <= thresh) {
  412. e[m - 1] = 0.;
  413. }
  414. } else {
  415. /* Chase bulge from bottom to top Save cosines and sines for
  416. later singular vector updates */
  417. cs = 1.;
  418. oldcs = 1.;
  419. i__1 = ll + 1;
  420. for (i__ = m; i__ >= i__1; --i__) {
  421. d__1 = d__[i__] * cs;
  422. NUMlapack_dlartg (&d__1, &e[i__ - 1], &cs, &sn, &r__);
  423. if (i__ < m) {
  424. e[i__] = oldsn * r__;
  425. }
  426. d__1 = oldcs * r__;
  427. d__2 = d__[i__ - 1] * sn;
  428. NUMlapack_dlartg (&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
  429. work[i__ - ll] = cs;
  430. work[i__ - ll + nm1] = -sn;
  431. work[i__ - ll + nm12] = oldcs;
  432. work[i__ - ll + nm13] = -oldsn;
  433. /* L130: */
  434. }
  435. h__ = d__[ll] * cs;
  436. d__[ll] = h__ * oldcs;
  437. e[ll] = h__ * oldsn;
  438. /* Update singular vectors */
  439. if (*ncvt > 0) {
  440. i__1 = m - ll + 1;
  441. NUMlapack_dlasr ("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[nm13 + 1], &vt_ref (ll, 1),
  442. ldvt);
  443. }
  444. if (*nru > 0) {
  445. i__1 = m - ll + 1;
  446. NUMlapack_dlasr ("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u_ref (1, ll), ldu);
  447. }
  448. if (*ncc > 0) {
  449. i__1 = m - ll + 1;
  450. NUMlapack_dlasr ("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c___ref (ll, 1), ldc);
  451. }
  452. /* Test convergence */
  453. if ( (d__1 = e[ll], fabs (d__1)) <= thresh) {
  454. e[ll] = 0.;
  455. }
  456. }
  457. } else {
  458. /* Use nonzero shift */
  459. if (idir == 1) {
  460. /* Chase bulge from top to bottom Save cosines and sines for
  461. later singular vector updates */
  462. f = ( (d__1 = d__[ll], fabs (d__1)) - shift) * (d_sign (&c_b49, &d__[ll]) + shift / d__[ll]);
  463. g = e[ll];
  464. i__1 = m - 1;
  465. for (i__ = ll; i__ <= i__1; ++i__) {
  466. NUMlapack_dlartg (&f, &g, &cosr, &sinr, &r__);
  467. if (i__ > ll) {
  468. e[i__ - 1] = r__;
  469. }
  470. f = cosr * d__[i__] + sinr * e[i__];
  471. e[i__] = cosr * e[i__] - sinr * d__[i__];
  472. g = sinr * d__[i__ + 1];
  473. d__[i__ + 1] = cosr * d__[i__ + 1];
  474. NUMlapack_dlartg (&f, &g, &cosl, &sinl, &r__);
  475. d__[i__] = r__;
  476. f = cosl * e[i__] + sinl * d__[i__ + 1];
  477. d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
  478. if (i__ < m - 1) {
  479. g = sinl * e[i__ + 1];
  480. e[i__ + 1] = cosl * e[i__ + 1];
  481. }
  482. work[i__ - ll + 1] = cosr;
  483. work[i__ - ll + 1 + nm1] = sinr;
  484. work[i__ - ll + 1 + nm12] = cosl;
  485. work[i__ - ll + 1 + nm13] = sinl;
  486. /* L140: */
  487. }
  488. e[m - 1] = f;
  489. /* Update singular vectors */
  490. if (*ncvt > 0) {
  491. i__1 = m - ll + 1;
  492. NUMlapack_dlasr ("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt_ref (ll, 1), ldvt);
  493. }
  494. if (*nru > 0) {
  495. i__1 = m - ll + 1;
  496. NUMlapack_dlasr ("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 + 1], &u_ref (1, ll),
  497. ldu);
  498. }
  499. if (*ncc > 0) {
  500. i__1 = m - ll + 1;
  501. NUMlapack_dlasr ("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 + 1], &c___ref (ll, 1),
  502. ldc);
  503. }
  504. /* Test convergence */
  505. if ( (d__1 = e[m - 1], fabs (d__1)) <= thresh) {
  506. e[m - 1] = 0.;
  507. }
  508. } else {
  509. /* Chase bulge from bottom to top Save cosines and sines for
  510. later singular vector updates */
  511. f = ( (d__1 = d__[m], fabs (d__1)) - shift) * (d_sign (&c_b49, &d__[m]) + shift / d__[m]);
  512. g = e[m - 1];
  513. i__1 = ll + 1;
  514. for (i__ = m; i__ >= i__1; --i__) {
  515. NUMlapack_dlartg (&f, &g, &cosr, &sinr, &r__);
  516. if (i__ < m) {
  517. e[i__] = r__;
  518. }
  519. f = cosr * d__[i__] + sinr * e[i__ - 1];
  520. e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
  521. g = sinr * d__[i__ - 1];
  522. d__[i__ - 1] = cosr * d__[i__ - 1];
  523. NUMlapack_dlartg (&f, &g, &cosl, &sinl, &r__);
  524. d__[i__] = r__;
  525. f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
  526. d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
  527. if (i__ > ll + 1) {
  528. g = sinl * e[i__ - 2];
  529. e[i__ - 2] = cosl * e[i__ - 2];
  530. }
  531. work[i__ - ll] = cosr;
  532. work[i__ - ll + nm1] = -sinr;
  533. work[i__ - ll + nm12] = cosl;
  534. work[i__ - ll + nm13] = -sinl;
  535. /* L150: */
  536. }
  537. e[ll] = f;
  538. /* Test convergence */
  539. if ( (d__1 = e[ll], fabs (d__1)) <= thresh) {
  540. e[ll] = 0.;
  541. }
  542. /* Update singular vectors if desired */
  543. if (*ncvt > 0) {
  544. i__1 = m - ll + 1;
  545. NUMlapack_dlasr ("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[nm13 + 1], &vt_ref (ll, 1),
  546. ldvt);
  547. }
  548. if (*nru > 0) {
  549. i__1 = m - ll + 1;
  550. NUMlapack_dlasr ("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u_ref (1, ll), ldu);
  551. }
  552. if (*ncc > 0) {
  553. i__1 = m - ll + 1;
  554. NUMlapack_dlasr ("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c___ref (ll, 1), ldc);
  555. }
  556. }
  557. }
  558. /* QR iteration finished, go back and check convergence */
  559. goto L60;
  560. /* All singular values converged, so make them positive */
  561. L160:
  562. i__1 = *n;
  563. for (i__ = 1; i__ <= i__1; ++i__) {
  564. if (d__[i__] < 0.) {
  565. d__[i__] = -d__[i__];
  566. /* Change sign of singular vectors, if desired */
  567. if (*ncvt > 0) {
  568. NUMblas_dscal (ncvt, &c_b72, &vt_ref (i__, 1), ldvt);
  569. }
  570. }
  571. /* L170: */
  572. }
  573. /* Sort the singular values into decreasing order (insertion sort on
  574. singular values, but only one transposition per singular vector) */
  575. i__1 = *n - 1;
  576. for (i__ = 1; i__ <= i__1; ++i__) {
  577. /* Scan for smallest D(I) */
  578. isub = 1;
  579. smin = d__[1];
  580. i__2 = *n + 1 - i__;
  581. for (j = 2; j <= i__2; ++j) {
  582. if (d__[j] <= smin) {
  583. isub = j;
  584. smin = d__[j];
  585. }
  586. /* L180: */
  587. }
  588. if (isub != *n + 1 - i__) {
  589. /* Swap singular values and vectors */
  590. d__[isub] = d__[*n + 1 - i__];
  591. d__[*n + 1 - i__] = smin;
  592. if (*ncvt > 0) {
  593. NUMblas_dswap (ncvt, &vt_ref (isub, 1), ldvt, &vt_ref (*n + 1 - i__, 1), ldvt);
  594. }
  595. if (*nru > 0) {
  596. NUMblas_dswap (nru, &u_ref (1, isub), &c__1, &u_ref (1, *n + 1 - i__), &c__1);
  597. }
  598. if (*ncc > 0) {
  599. NUMblas_dswap (ncc, &c___ref (isub, 1), ldc, &c___ref (*n + 1 - i__, 1), ldc);
  600. }
  601. }
  602. /* L190: */
  603. }
  604. goto L220;
  605. /* Maximum number of iterations exceeded, failure to converge */
  606. L200:
  607. *info = 0;
  608. i__1 = *n - 1;
  609. for (i__ = 1; i__ <= i__1; ++i__) {
  610. if (e[i__] != 0.) {
  611. ++ (*info);
  612. }
  613. /* L210: */
  614. }
  615. L220:
  616. return 0;
  617. } /* NUMlapack_dbdsqr */
  618. #undef vt_ref
  619. #undef u_ref
  620. int NUMlapack_dgebd2 (integer *m, integer *n, double *a, integer *lda, double *d__, double *e, double *tauq,
  621. double *taup, double *work, integer *info) {
  622. /* Table of constant values */
  623. static integer c__1 = 1;
  624. /* System generated locals */
  625. integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
  626. /* Local variables */
  627. static integer i__;
  628. a_dim1 = *lda;
  629. a_offset = 1 + a_dim1 * 1;
  630. a -= a_offset;
  631. --d__;
  632. --e;
  633. --tauq;
  634. --taup;
  635. --work;
  636. /* Function Body */
  637. *info = 0;
  638. if (*m < 0) {
  639. *info = -1;
  640. } else if (*n < 0) {
  641. *info = -2;
  642. } else if (*lda < MAX (1, *m)) {
  643. *info = -4;
  644. }
  645. if (*info < 0) {
  646. i__1 = - (*info);
  647. xerbla_ ("DGEBD2", &i__1);
  648. return 0;
  649. }
  650. if (*m >= *n) {
  651. /* Reduce to upper bidiagonal form */
  652. i__1 = *n;
  653. for (i__ = 1; i__ <= i__1; ++i__) {
  654. /* Generate elementary reflector H(i) to annihilate A(i+1:m,i)
  655. Computing MIN */
  656. i__2 = i__ + 1;
  657. i__3 = *m - i__ + 1;
  658. NUMlapack_dlarfg (&i__3, &a_ref (i__, i__), &a_ref (MIN (i__2, *m), i__), &c__1, &tauq[i__]);
  659. d__[i__] = a_ref (i__, i__);
  660. a_ref (i__, i__) = 1.;
  661. /* Apply H(i) to A(i:m,i+1:n) from the left */
  662. i__2 = *m - i__ + 1;
  663. i__3 = *n - i__;
  664. NUMlapack_dlarf ("Left", &i__2, &i__3, &a_ref (i__, i__), &c__1, &tauq[i__], &a_ref (i__, i__ + 1),
  665. lda, &work[1]);
  666. a_ref (i__, i__) = d__[i__];
  667. if (i__ < *n) {
  668. /* Generate elementary reflector G(i) to annihilate
  669. A(i,i+2:n)
  670. Computing MIN */
  671. i__2 = i__ + 2;
  672. i__3 = *n - i__;
  673. NUMlapack_dlarfg (&i__3, &a_ref (i__, i__ + 1), &a_ref (i__, MIN (i__2, *n)), lda, &taup[i__]);
  674. e[i__] = a_ref (i__, i__ + 1);
  675. a_ref (i__, i__ + 1) = 1.;
  676. /* Apply G(i) to A(i+1:m,i+1:n) from the right */
  677. i__2 = *m - i__;
  678. i__3 = *n - i__;
  679. NUMlapack_dlarf ("Right", &i__2, &i__3, &a_ref (i__, i__ + 1), lda, &taup[i__], &a_ref (i__ + 1,
  680. i__ + 1), lda, &work[1]);
  681. a_ref (i__, i__ + 1) = e[i__];
  682. } else {
  683. taup[i__] = 0.;
  684. }
  685. /* L10: */
  686. }
  687. } else {
  688. /* Reduce to lower bidiagonal form */
  689. i__1 = *m;
  690. for (i__ = 1; i__ <= i__1; ++i__) {
  691. /* Generate elementary reflector G(i) to annihilate A(i,i+1:n)
  692. Computing MIN */
  693. i__2 = i__ + 1;
  694. i__3 = *n - i__ + 1;
  695. NUMlapack_dlarfg (&i__3, &a_ref (i__, i__), &a_ref (i__, MIN (i__2, *n)), lda, &taup[i__]);
  696. d__[i__] = a_ref (i__, i__);
  697. a_ref (i__, i__) = 1.;
  698. /* Apply G(i) to A(i+1:m,i:n) from the right
  699. Computing MIN */
  700. i__2 = i__ + 1;
  701. i__3 = *m - i__;
  702. i__4 = *n - i__ + 1;
  703. NUMlapack_dlarf ("Right", &i__3, &i__4, &a_ref (i__, i__), lda, &taup[i__], &a_ref (MIN (i__2, *m),
  704. i__), lda, &work[1]);
  705. a_ref (i__, i__) = d__[i__];
  706. if (i__ < *m) {
  707. /* Generate elementary reflector H(i) to annihilate
  708. A(i+2:m,i)
  709. Computing MIN */
  710. i__2 = i__ + 2;
  711. i__3 = *m - i__;
  712. NUMlapack_dlarfg (&i__3, &a_ref (i__ + 1, i__), &a_ref (MIN (i__2, *m), i__), &c__1,
  713. &tauq[i__]);
  714. e[i__] = a_ref (i__ + 1, i__);
  715. a_ref (i__ + 1, i__) = 1.;
  716. /* Apply H(i) to A(i+1:m,i+1:n) from the left */
  717. i__2 = *m - i__;
  718. i__3 = *n - i__;
  719. NUMlapack_dlarf ("Left", &i__2, &i__3, &a_ref (i__ + 1, i__), &c__1, &tauq[i__],
  720. &a_ref (i__ + 1, i__ + 1), lda, &work[1]);
  721. a_ref (i__ + 1, i__) = e[i__];
  722. } else {
  723. tauq[i__] = 0.;
  724. }
  725. /* L20: */
  726. }
  727. }
  728. return 0;
  729. } /* NUMlapack_dgebd2 */
  730. int NUMlapack_dgebak (const char *job, const char *side, integer *n, integer *ilo, integer *ihi, double *scale, integer *m,
  731. double *v, integer *ldv, integer *info) {
  732. /* System generated locals */
  733. integer v_dim1, v_offset, i__1;
  734. /* Local variables */
  735. static integer i__, k;
  736. static double s;
  737. static int leftv;
  738. static integer ii;
  739. static int rightv;
  740. #define v_ref(a_1,a_2) v[(a_2)*v_dim1 + a_1]
  741. --scale;
  742. v_dim1 = *ldv;
  743. v_offset = 1 + v_dim1 * 1;
  744. v -= v_offset;
  745. /* Function Body */
  746. rightv = lsame_ (side, "R");
  747. leftv = lsame_ (side, "L");
  748. *info = 0;
  749. if (!lsame_ (job, "N") && !lsame_ (job, "P") && !lsame_ (job, "S") && !lsame_ (job, "B")) {
  750. *info = -1;
  751. } else if (!rightv && !leftv) {
  752. *info = -2;
  753. } else if (*n < 0) {
  754. *info = -3;
  755. } else if (*ilo < 1 || *ilo > MAX (1, *n)) {
  756. *info = -4;
  757. } else if (*ihi < MIN (*ilo, *n) || *ihi > *n) {
  758. *info = -5;
  759. } else if (*m < 0) {
  760. *info = -7;
  761. } else if (*ldv < MAX (1, *n)) {
  762. *info = -9;
  763. }
  764. if (*info != 0) {
  765. i__1 = - (*info);
  766. xerbla_ ("NUMlapack_dgebak ", &i__1);
  767. return 0;
  768. }
  769. /* Quick return if possible */
  770. if (*n == 0) {
  771. return 0;
  772. }
  773. if (*m == 0) {
  774. return 0;
  775. }
  776. if (lsame_ (job, "N")) {
  777. return 0;
  778. }
  779. if (*ilo == *ihi) {
  780. goto L30;
  781. }
  782. /* Backward balance */
  783. if (lsame_ (job, "S") || lsame_ (job, "B")) {
  784. if (rightv) {
  785. i__1 = *ihi;
  786. for (i__ = *ilo; i__ <= i__1; ++i__) {
  787. s = scale[i__];
  788. NUMblas_dscal (m, &s, &v_ref (i__, 1), ldv);
  789. /* L10: */
  790. }
  791. }
  792. if (leftv) {
  793. i__1 = *ihi;
  794. for (i__ = *ilo; i__ <= i__1; ++i__) {
  795. s = 1. / scale[i__];
  796. NUMblas_dscal (m, &s, &v_ref (i__, 1), ldv);
  797. /* L20: */
  798. }
  799. }
  800. }
  801. /* Backward permutation
  802. For I = ILO-1 step -1 until 1, IHI+1 step 1 until N do -- */
  803. L30:
  804. if (lsame_ (job, "P") || lsame_ (job, "B")) {
  805. if (rightv) {
  806. i__1 = *n;
  807. for (ii = 1; ii <= i__1; ++ii) {
  808. i__ = ii;
  809. if (i__ >= *ilo && i__ <= *ihi) {
  810. goto L40;
  811. }
  812. if (i__ < *ilo) {
  813. i__ = *ilo - ii;
  814. }
  815. k = (integer) scale[i__];
  816. if (k == i__) {
  817. goto L40;
  818. }
  819. NUMblas_dswap (m, &v_ref (i__, 1), ldv, &v_ref (k, 1), ldv);
  820. L40:
  821. ;
  822. }
  823. }
  824. if (leftv) {
  825. i__1 = *n;
  826. for (ii = 1; ii <= i__1; ++ii) {
  827. i__ = ii;
  828. if (i__ >= *ilo && i__ <= *ihi) {
  829. goto L50;
  830. }
  831. if (i__ < *ilo) {
  832. i__ = *ilo - ii;
  833. }
  834. k = (integer) scale[i__];
  835. if (k == i__) {
  836. goto L50;
  837. }
  838. NUMblas_dswap (m, &v_ref (i__, 1), ldv, &v_ref (k, 1), ldv);
  839. L50:
  840. ;
  841. }
  842. }
  843. }
  844. return 0;
  845. } /* NUMlapack_dgebak */
  846. #undef v_ref
  847. int NUMlapack_dgebal (const char *job, integer *n, double *a, integer *lda, integer *ilo, integer *ihi, double *scale,
  848. integer *info) {
  849. /* Table of constant values */
  850. static integer c__1 = 1;
  851. /* System generated locals */
  852. integer a_dim1, a_offset, i__1, i__2;
  853. double d__1, d__2;
  854. /* Local variables */
  855. static integer iexc;
  856. static double c__, f, g;
  857. static integer i__, j, k, l, m;
  858. static double r__, s;
  859. static double sfmin1, sfmin2, sfmax1, sfmax2, ca, ra;
  860. static int noconv;
  861. static integer ica, ira;
  862. a_dim1 = *lda;
  863. a_offset = 1 + a_dim1 * 1;
  864. a -= a_offset;
  865. --scale;
  866. /* Function Body */
  867. *info = 0;
  868. if (!lsame_ (job, "N") && !lsame_ (job, "P") && !lsame_ (job, "S") && !lsame_ (job, "B")) {
  869. *info = -1;
  870. } else if (*n < 0) {
  871. *info = -2;
  872. } else if (*lda < MAX (1, *n)) {
  873. *info = -4;
  874. }
  875. if (*info != 0) {
  876. i__1 = - (*info);
  877. xerbla_ ("NUMlapack_dgebal ", &i__1);
  878. return 0;
  879. }
  880. k = 1;
  881. l = *n;
  882. if (*n == 0) {
  883. goto L210;
  884. }
  885. if (lsame_ (job, "N")) {
  886. i__1 = *n;
  887. for (i__ = 1; i__ <= i__1; ++i__) {
  888. scale[i__] = 1.;
  889. /* L10: */
  890. }
  891. goto L210;
  892. }
  893. if (lsame_ (job, "S")) {
  894. goto L120;
  895. }
  896. /* Permutation to isolate eigenvalues if possible */
  897. goto L50;
  898. /* Row and column exchange. */
  899. L20:
  900. scale[m] = (double) j;
  901. if (j == m) {
  902. goto L30;
  903. }
  904. NUMblas_dswap (&l, &a_ref (1, j), &c__1, &a_ref (1, m), &c__1);
  905. i__1 = *n - k + 1;
  906. NUMblas_dswap (&i__1, &a_ref (j, k), lda, &a_ref (m, k), lda);
  907. L30:
  908. switch (iexc) {
  909. case 1:
  910. goto L40;
  911. case 2:
  912. goto L80;
  913. }
  914. /* Search for rows isolating an eigenvalue and push them down. */
  915. L40:
  916. if (l == 1) {
  917. goto L210;
  918. }
  919. --l;
  920. L50:
  921. for (j = l; j >= 1; --j) {
  922. i__1 = l;
  923. for (i__ = 1; i__ <= i__1; ++i__) {
  924. if (i__ == j) {
  925. goto L60;
  926. }
  927. if (a_ref (j, i__) != 0.) {
  928. goto L70;
  929. }
  930. L60:
  931. ;
  932. }
  933. m = l;
  934. iexc = 1;
  935. goto L20;
  936. L70:
  937. ;
  938. }
  939. goto L90;
  940. /* Search for columns isolating an eigenvalue and push them left. */
  941. L80:
  942. ++k;
  943. L90:
  944. i__1 = l;
  945. for (j = k; j <= i__1; ++j) {
  946. i__2 = l;
  947. for (i__ = k; i__ <= i__2; ++i__) {
  948. if (i__ == j) {
  949. goto L100;
  950. }
  951. if (a_ref (i__, j) != 0.) {
  952. goto L110;
  953. }
  954. L100:
  955. ;
  956. }
  957. m = k;
  958. iexc = 2;
  959. goto L20;
  960. L110:
  961. ;
  962. }
  963. L120:
  964. i__1 = l;
  965. for (i__ = k; i__ <= i__1; ++i__) {
  966. scale[i__] = 1.;
  967. /* L130: */
  968. }
  969. if (lsame_ (job, "P")) {
  970. goto L210;
  971. }
  972. /* Balance the submatrix in rows K to L.
  973. Iterative loop for norm reduction */
  974. sfmin1 = NUMblas_dlamch ("S") / NUMblas_dlamch ("P");
  975. sfmax1 = 1. / sfmin1;
  976. sfmin2 = sfmin1 * 8.;
  977. sfmax2 = 1. / sfmin2;
  978. L140:
  979. noconv = FALSE;
  980. i__1 = l;
  981. for (i__ = k; i__ <= i__1; ++i__) {
  982. c__ = 0.;
  983. r__ = 0.;
  984. i__2 = l;
  985. for (j = k; j <= i__2; ++j) {
  986. if (j == i__) {
  987. goto L150;
  988. }
  989. c__ += (d__1 = a_ref (j, i__), fabs (d__1));
  990. r__ += (d__1 = a_ref (i__, j), fabs (d__1));
  991. L150:
  992. ;
  993. }
  994. ica = NUMblas_idamax (&l, &a_ref (1, i__), &c__1);
  995. ca = (d__1 = a_ref (ica, i__), fabs (d__1));
  996. i__2 = *n - k + 1;
  997. ira = NUMblas_idamax (&i__2, &a_ref (i__, k), lda);
  998. ra = (d__1 = a_ref (i__, ira + k - 1), fabs (d__1));
  999. /* Guard against zero C or R due to underflow. */
  1000. if (c__ == 0. || r__ == 0.) {
  1001. goto L200;
  1002. }
  1003. g = r__ / 8.;
  1004. f = 1.;
  1005. s = c__ + r__;
  1006. L160:
  1007. /* Computing MAX */
  1008. d__1 = MAX (f, c__);
  1009. /* Computing MIN */
  1010. d__2 = MIN (r__, g);
  1011. if (c__ >= g || MAX (d__1, ca) >= sfmax2 || MIN (d__2, ra) <= sfmin2) {
  1012. goto L170;
  1013. }
  1014. f *= 8.;
  1015. c__ *= 8.;
  1016. ca *= 8.;
  1017. r__ /= 8.;
  1018. g /= 8.;
  1019. ra /= 8.;
  1020. goto L160;
  1021. L170:
  1022. g = c__ / 8.;
  1023. L180:
  1024. /* Computing MIN */
  1025. d__1 = MIN (f, c__), d__1 = MIN (d__1, g);
  1026. if (g < r__ || MAX (r__, ra) >= sfmax2 || MIN (d__1, ca) <= sfmin2) {
  1027. goto L190;
  1028. }
  1029. f /= 8.;
  1030. c__ /= 8.;
  1031. g /= 8.;
  1032. ca /= 8.;
  1033. r__ *= 8.;
  1034. ra *= 8.;
  1035. goto L180;
  1036. /* Now balance. */
  1037. L190:
  1038. if (c__ + r__ >= s * .95) {
  1039. goto L200;
  1040. }
  1041. if (f < 1. && scale[i__] < 1.) {
  1042. if (f * scale[i__] <= sfmin1) {
  1043. goto L200;
  1044. }
  1045. }
  1046. if (f > 1. && scale[i__] > 1.) {
  1047. if (scale[i__] >= sfmax1 / f) {
  1048. goto L200;
  1049. }
  1050. }
  1051. g = 1. / f;
  1052. scale[i__] *= f;
  1053. noconv = TRUE;
  1054. i__2 = *n - k + 1;
  1055. NUMblas_dscal (&i__2, &g, &a_ref (i__, k), lda);
  1056. NUMblas_dscal (&l, &f, &a_ref (1, i__), &c__1);
  1057. L200:
  1058. ;
  1059. }
  1060. if (noconv) {
  1061. goto L140;
  1062. }
  1063. L210:
  1064. *ilo = k;
  1065. *ihi = l;
  1066. return 0;
  1067. } /* NUMlapack_dgebal */
  1068. int NUMlapack_dgebrd (integer *m, integer *n, double *a, integer *lda, double *d__, double *e, double *tauq,
  1069. double *taup, double *work, integer *lwork, integer *info) {
  1070. /* Table of constant values */
  1071. static integer c__1 = 1;
  1072. static integer c_n1 = -1;
  1073. static integer c__3 = 3;
  1074. static integer c__2 = 2;
  1075. static double c_b21 = -1.;
  1076. static double c_b22 = 1.;
  1077. /* System generated locals */
  1078. integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
  1079. /* Local variables */
  1080. static integer i__, j;
  1081. static integer nbmin, iinfo, minmn;
  1082. static integer nb;
  1083. static integer nx;
  1084. static double ws;
  1085. static integer ldwrkx, ldwrky, lwkopt;
  1086. static integer lquery;
  1087. a_dim1 = *lda;
  1088. a_offset = 1 + a_dim1 * 1;
  1089. a -= a_offset;
  1090. --d__;
  1091. --e;
  1092. --tauq;
  1093. --taup;
  1094. --work;
  1095. /* Function Body */
  1096. *info = 0;
  1097. /* Computing MAX */
  1098. i__1 = 1, i__2 = NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, 6, 1);
  1099. nb = MAX (i__1, i__2);
  1100. lwkopt = (*m + *n) * nb;
  1101. work[1] = (double) lwkopt;
  1102. lquery = *lwork == -1;
  1103. if (*m < 0) {
  1104. *info = -1;
  1105. } else if (*n < 0) {
  1106. *info = -2;
  1107. } else if (*lda < MAX (1, *m)) {
  1108. *info = -4;
  1109. } else { /* if(complicated condition) */
  1110. /* Computing MAX */
  1111. i__1 = MAX (1, *m);
  1112. if (*lwork < MAX (i__1, *n) && !lquery) {
  1113. *info = -10;
  1114. }
  1115. }
  1116. if (*info < 0) {
  1117. i__1 = - (*info);
  1118. xerbla_ ("DGEBRD", &i__1);
  1119. return 0;
  1120. } else if (lquery) {
  1121. return 0;
  1122. }
  1123. /* Quick return if possible */
  1124. minmn = MIN (*m, *n);
  1125. if (minmn == 0) {
  1126. work[1] = 1.;
  1127. return 0;
  1128. }
  1129. ws = (double) MAX (*m, *n);
  1130. ldwrkx = *m;
  1131. ldwrky = *n;
  1132. if (nb > 1 && nb < minmn) {
  1133. /* Set the crossover point NX.
  1134. Computing MAX */
  1135. i__1 = nb, i__2 = NUMlapack_ilaenv (&c__3, "DGEBRD", " ", m, n, &c_n1, &c_n1, 6, 1);
  1136. nx = MAX (i__1, i__2);
  1137. /* Determine when to switch from blocked to unblocked code. */
  1138. if (nx < minmn) {
  1139. ws = (double) ( (*m + *n) * nb);
  1140. if ( (double) (*lwork) < ws) {
  1141. /* Not enough work space for the optimal NB, consider using a
  1142. smaller block size. */
  1143. nbmin = NUMlapack_ilaenv (&c__2, "DGEBRD", " ", m, n, &c_n1, &c_n1, 6, 1);
  1144. if (*lwork >= (*m + *n) * nbmin) {
  1145. nb = *lwork / (*m + *n);
  1146. } else {
  1147. nb = 1;
  1148. nx = minmn;
  1149. }
  1150. }
  1151. }
  1152. } else {
  1153. nx = minmn;
  1154. }
  1155. i__1 = minmn - nx;
  1156. i__2 = nb;
  1157. for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  1158. /* Reduce rows and columns i:i+nb-1 to bidiagonal form and return the
  1159. matrices X and Y which are needed to update the unreduced part of
  1160. the matrix */
  1161. i__3 = *m - i__ + 1;
  1162. i__4 = *n - i__ + 1;
  1163. NUMlapack_dlabrd (&i__3, &i__4, &nb, &a_ref (i__, i__), lda, &d__[i__], &e[i__], &tauq[i__], &taup[i__],
  1164. &work[1], &ldwrkx, &work[ldwrkx * nb + 1], &ldwrky);
  1165. /* Update the trailing submatrix A(i+nb:m,i+nb:n), using an update of
  1166. the form A := A - V*Y' - X*U' */
  1167. i__3 = *m - i__ - nb + 1;
  1168. i__4 = *n - i__ - nb + 1;
  1169. NUMblas_dgemm ("No transpose", "Transpose", &i__3, &i__4, &nb, &c_b21, &a_ref (i__ + nb, i__), lda,
  1170. &work[ldwrkx * nb + nb + 1], &ldwrky, &c_b22, &a_ref (i__ + nb, i__ + nb), lda);
  1171. i__3 = *m - i__ - nb + 1;
  1172. i__4 = *n - i__ - nb + 1;
  1173. NUMblas_dgemm ("No transpose", "No transpose", &i__3, &i__4, &nb, &c_b21, &work[nb + 1], &ldwrkx,
  1174. &a_ref (i__, i__ + nb), lda, &c_b22, &a_ref (i__ + nb, i__ + nb), lda);
  1175. /* Copy diagonal and off-diagonal elements of B back into A */
  1176. if (*m >= *n) {
  1177. i__3 = i__ + nb - 1;
  1178. for (j = i__; j <= i__3; ++j) {
  1179. a_ref (j, j) = d__[j];
  1180. a_ref (j, j + 1) = e[j];
  1181. /* L10: */
  1182. }
  1183. } else {
  1184. i__3 = i__ + nb - 1;
  1185. for (j = i__; j <= i__3; ++j) {
  1186. a_ref (j, j) = d__[j];
  1187. a_ref (j + 1, j) = e[j];
  1188. /* L20: */
  1189. }
  1190. }
  1191. /* L30: */
  1192. }
  1193. /* Use unblocked code to reduce the remainder of the matrix */
  1194. i__2 = *m - i__ + 1;
  1195. i__1 = *n - i__ + 1;
  1196. NUMlapack_dgebd2 (&i__2, &i__1, &a_ref (i__, i__), lda, &d__[i__], &e[i__], &tauq[i__], &taup[i__],
  1197. &work[1], &iinfo);
  1198. work[1] = ws;
  1199. return 0;
  1200. } /* NUMlapack_dgebrd */
  1201. int NUMlapack_dgeev (const char *jobvl, const char *jobvr, integer *n, double *a, integer *lda, double *wr, double *wi,
  1202. double *vl, integer *ldvl, double *vr, integer *ldvr, double *work, integer *lwork, integer *info) {
  1203. /* Table of constant values */
  1204. static integer c__8 = 8;
  1205. static integer c_n1 = -1;
  1206. /* System generated locals */
  1207. integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, i__2, i__3, i__4;
  1208. double d__1, d__2;
  1209. /* Local variables */
  1210. static integer ibal;
  1211. static char side[1];
  1212. static integer maxb;
  1213. static double anrm;
  1214. static integer ierr, itau;
  1215. static integer iwrk, nout;
  1216. static integer i__, k;
  1217. static double r__;
  1218. static double cs;
  1219. static int scalea;
  1220. static double cscale;
  1221. static double sn;
  1222. static int select[1];
  1223. static double bignum;
  1224. static integer minwrk, maxwrk;
  1225. static int wantvl;
  1226. static double smlnum;
  1227. static integer hswork;
  1228. static int lquery, wantvr;
  1229. static integer ihi;
  1230. static double scl;
  1231. static integer ilo;
  1232. static double dum[1], eps;
  1233. #define vl_ref(a_1,a_2) vl[(a_2)*vl_dim1 + a_1]
  1234. #define vr_ref(a_1,a_2) vr[(a_2)*vr_dim1 + a_1]
  1235. a_dim1 = *lda;
  1236. a_offset = 1 + a_dim1 * 1;
  1237. a -= a_offset;
  1238. --wr;
  1239. --wi;
  1240. vl_dim1 = *ldvl;
  1241. vl_offset = 1 + vl_dim1 * 1;
  1242. vl -= vl_offset;
  1243. vr_dim1 = *ldvr;
  1244. vr_offset = 1 + vr_dim1 * 1;
  1245. vr -= vr_offset;
  1246. --work;
  1247. /* Function Body */
  1248. *info = 0;
  1249. lquery = *lwork == -1;
  1250. wantvl = lsame_ (jobvl, "V");
  1251. wantvr = lsame_ (jobvr, "V");
  1252. if (!wantvl && !lsame_ (jobvl, "N")) {
  1253. *info = -1;
  1254. } else if (!wantvr && !lsame_ (jobvr, "N")) {
  1255. *info = -2;
  1256. } else if (*n < 0) {
  1257. *info = -3;
  1258. } else if (*lda < MAX (1, *n)) {
  1259. *info = -5;
  1260. } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
  1261. *info = -9;
  1262. } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
  1263. *info = -11;
  1264. }
  1265. /* Compute workspace (Note: Comments in the code beginning "Workspace:"
  1266. describe the minimal amount of workspace needed at that point in the
  1267. code, as well as the preferred amount for good performance. NB refers
  1268. to the optimal block size for the immediately following subroutine, as
  1269. returned by ILAENV. HSWORK refers to the workspace preferred by
  1270. NUMlapack_dhseqr , as calculated below. HSWORK is computed assuming
  1271. ILO=1 and IHI=N, the worst case.) */
  1272. minwrk = 1;
  1273. if (*info == 0 && (*lwork >= 1 || lquery)) {
  1274. maxwrk =
  1275. (*n << 1) + *n * NUMlapack_ilaenv (&c__1, "NUMlapack_dgehrd ", " ", n, &c__1, n, &c__0, 6, 1);
  1276. if (!wantvl && !wantvr) {
  1277. /* Computing MAX */
  1278. i__1 = 1, i__2 = *n * 3;
  1279. minwrk = MAX (i__1, i__2);
  1280. /* Computing MAX */
  1281. i__1 = NUMlapack_ilaenv (&c__8, "NUMlapack_dhseqr ", "EN", n, &c__1, n, &c_n1, 6, 2);
  1282. maxb = MAX (i__1, 2);
  1283. /* Computing MIN Computing MAX */
  1284. i__3 = 2, i__4 = NUMlapack_ilaenv (&c__4, "NUMlapack_dhseqr ", "EN", n, &c__1, n, &c_n1, 6, 2);
  1285. i__1 = MIN (maxb, *n), i__2 = MAX (i__3, i__4);
  1286. k = MIN (i__1, i__2);
  1287. /* Computing MAX */
  1288. i__1 = k * (k + 2), i__2 = *n << 1;
  1289. hswork = MAX (i__1, i__2);
  1290. /* Computing MAX */
  1291. i__1 = maxwrk, i__2 = *n + 1, i__1 = MAX (i__1, i__2), i__2 = *n + hswork;
  1292. maxwrk = MAX (i__1, i__2);
  1293. } else {
  1294. /* Computing MAX */
  1295. i__1 = 1, i__2 = *n << 2;
  1296. minwrk = MAX (i__1, i__2);
  1297. /* Computing MAX */
  1298. i__1 = maxwrk, i__2 =
  1299. (*n << 1) + (*n - 1) * NUMlapack_ilaenv (&c__1, "DOR" "GHR", " ", n, &c__1, n, &c_n1, 6, 1);
  1300. maxwrk = MAX (i__1, i__2);
  1301. /* Computing MAX */
  1302. i__1 = NUMlapack_ilaenv (&c__8, "NUMlapack_dhseqr ", "SV", n, &c__1, n, &c_n1, 6, 2);
  1303. maxb = MAX (i__1, 2);
  1304. /* Computing MIN Computing MAX */
  1305. i__3 = 2, i__4 = NUMlapack_ilaenv (&c__4, "NUMlapack_dhseqr ", "SV", n, &c__1, n, &c_n1, 6, 2);
  1306. i__1 = MIN (maxb, *n), i__2 = MAX (i__3, i__4);
  1307. k = MIN (i__1, i__2);
  1308. /* Computing MAX */
  1309. i__1 = k * (k + 2), i__2 = *n << 1;
  1310. hswork = MAX (i__1, i__2);
  1311. /* Computing MAX */
  1312. i__1 = maxwrk, i__2 = *n + 1, i__1 = MAX (i__1, i__2), i__2 = *n + hswork;
  1313. maxwrk = MAX (i__1, i__2);
  1314. /* Computing MAX */
  1315. i__1 = maxwrk, i__2 = *n << 2;
  1316. maxwrk = MAX (i__1, i__2);
  1317. }
  1318. work[1] = (double) maxwrk;
  1319. }
  1320. if (*lwork < minwrk && !lquery) {
  1321. *info = -13;
  1322. }
  1323. if (*info != 0) {
  1324. i__1 = - (*info);
  1325. xerbla_ ("NUMlapack_dgeev ", &i__1);
  1326. return 0;
  1327. } else if (lquery) {
  1328. return 0;
  1329. }
  1330. /* Quick return if possible */
  1331. if (*n == 0) {
  1332. return 0;
  1333. }
  1334. /* Get machine constants */
  1335. eps = NUMblas_dlamch ("P");
  1336. smlnum = NUMblas_dlamch ("S");
  1337. bignum = 1. / smlnum;
  1338. NUMlapack_dlabad (&smlnum, &bignum);
  1339. smlnum = sqrt (smlnum) / eps;
  1340. bignum = 1. / smlnum;
  1341. /* Scale A if max element outside range [SMLNUM,BIGNUM] */
  1342. anrm = NUMlapack_dlange ("M", n, n, &a[a_offset], lda, dum);
  1343. scalea = FALSE;
  1344. if (anrm > 0. && anrm < smlnum) {
  1345. scalea = TRUE;
  1346. cscale = smlnum;
  1347. } else if (anrm > bignum) {
  1348. scalea = TRUE;
  1349. cscale = bignum;
  1350. }
  1351. if (scalea) {
  1352. NUMlapack_dlascl ("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &ierr);
  1353. }
  1354. /* Balance the matrix (Workspace: need N) */
  1355. ibal = 1;
  1356. NUMlapack_dgebal ("B", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
  1357. /* Reduce to upper Hessenberg form (Workspace: need 3*N, prefer 2*N+N*NB)
  1358. */
  1359. itau = ibal + *n;
  1360. iwrk = itau + *n;
  1361. i__1 = *lwork - iwrk + 1;
  1362. NUMlapack_dgehrd (n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, &ierr);
  1363. if (wantvl) {
  1364. /* Want left eigenvectors Copy Householder vectors to VL */
  1365. * (unsigned char *) side = 'L';
  1366. NUMlapack_dlacpy ("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl);
  1367. /* Generate orthogonal matrix in VL (Workspace: need 3*N-1, prefer
  1368. 2*N+(N-1)*NB) */
  1369. i__1 = *lwork - iwrk + 1;
  1370. NUMlapack_dorghr (n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], &i__1, &ierr);
  1371. /* Perform QR iteration, accumulating Schur vectors in VL (Workspace:
  1372. need N+1, prefer N+HSWORK (see comments) ) */
  1373. iwrk = itau;
  1374. i__1 = *lwork - iwrk + 1;
  1375. NUMlapack_dhseqr ("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &vl[vl_offset], ldvl,
  1376. &work[iwrk], &i__1, info);
  1377. if (wantvr) {
  1378. /* Want left and right eigenvectors Copy Schur vectors to VR */
  1379. * (unsigned char *) side = 'B';
  1380. NUMlapack_dlacpy ("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
  1381. }
  1382. } else if (wantvr) {
  1383. /* Want right eigenvectors Copy Householder vectors to VR */
  1384. * (unsigned char *) side = 'R';
  1385. NUMlapack_dlacpy ("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr);
  1386. /* Generate orthogonal matrix in VR (Workspace: need 3*N-1, prefer
  1387. 2*N+(N-1)*NB) */
  1388. i__1 = *lwork - iwrk + 1;
  1389. NUMlapack_dorghr (n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], &i__1, &ierr);
  1390. /* Perform QR iteration, accumulating Schur vectors in VR (Workspace:
  1391. need N+1, prefer N+HSWORK (see comments) ) */
  1392. iwrk = itau;
  1393. i__1 = *lwork - iwrk + 1;
  1394. NUMlapack_dhseqr ("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &vr[vr_offset], ldvr,
  1395. &work[iwrk], &i__1, info);
  1396. } else {
  1397. /* Compute eigenvalues only (Workspace: need N+1, prefer N+HSWORK
  1398. (see comments) ) */
  1399. iwrk = itau;
  1400. i__1 = *lwork - iwrk + 1;
  1401. NUMlapack_dhseqr ("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &vr[vr_offset], ldvr,
  1402. &work[iwrk], &i__1, info);
  1403. }
  1404. /* If INFO > 0 from NUMlapack_dhseqr , then quit */
  1405. if (*info > 0) {
  1406. goto L50;
  1407. }
  1408. if (wantvl || wantvr) {
  1409. /* Compute left and/or right eigenvectors (Workspace: need 4*N) */
  1410. NUMlapack_dtrevc (side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl, &vr[vr_offset],
  1411. ldvr, n, &nout, &work[iwrk], &ierr);
  1412. }
  1413. if (wantvl) {
  1414. /* Undo balancing of left eigenvectors (Workspace: need N) */
  1415. NUMlapack_dgebak ("B", "L", n, &ilo, &ihi, &work[ibal], n, &vl[vl_offset], ldvl, &ierr);
  1416. /* Normalize left eigenvectors and make largest component real */
  1417. i__1 = *n;
  1418. for (i__ = 1; i__ <= i__1; ++i__) {
  1419. if (wi[i__] == 0.) {
  1420. scl = 1. / NUMblas_dnrm2 (n, &vl_ref (1, i__), &c__1);
  1421. NUMblas_dscal (n, &scl, &vl_ref (1, i__), &c__1);
  1422. } else if (wi[i__] > 0.) {
  1423. d__1 = NUMblas_dnrm2 (n, &vl_ref (1, i__), &c__1);
  1424. d__2 = NUMblas_dnrm2 (n, &vl_ref (1, i__ + 1), &c__1);
  1425. scl = 1. / NUMlapack_dlapy2 (&d__1, &d__2);
  1426. NUMblas_dscal (n, &scl, &vl_ref (1, i__), &c__1);
  1427. NUMblas_dscal (n, &scl, &vl_ref (1, i__ + 1), &c__1);
  1428. i__2 = *n;
  1429. for (k = 1; k <= i__2; ++k) {
  1430. /* Computing 2nd power */
  1431. d__1 = vl_ref (k, i__);
  1432. /* Computing 2nd power */
  1433. d__2 = vl_ref (k, i__ + 1);
  1434. work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2;
  1435. /* L10: */
  1436. }
  1437. k = NUMblas_idamax (n, &work[iwrk], &c__1);
  1438. NUMlapack_dlartg (&vl_ref (k, i__), &vl_ref (k, i__ + 1), &cs, &sn, &r__);
  1439. NUMblas_drot (n, &vl_ref (1, i__), &c__1, &vl_ref (1, i__ + 1), &c__1, &cs, &sn);
  1440. vl_ref (k, i__ + 1) = 0.;
  1441. }
  1442. /* L20: */
  1443. }
  1444. }
  1445. if (wantvr) {
  1446. /* Undo balancing of right eigenvectors (Workspace: need N) */
  1447. NUMlapack_dgebak ("B", "R", n, &ilo, &ihi, &work[ibal], n, &vr[vr_offset], ldvr, &ierr);
  1448. /* Normalize right eigenvectors and make largest component real */
  1449. i__1 = *n;
  1450. for (i__ = 1; i__ <= i__1; ++i__) {
  1451. if (wi[i__] == 0.) {
  1452. scl = 1. / NUMblas_dnrm2 (n, &vr_ref (1, i__), &c__1);
  1453. NUMblas_dscal (n, &scl, &vr_ref (1, i__), &c__1);
  1454. } else if (wi[i__] > 0.) {
  1455. d__1 = NUMblas_dnrm2 (n, &vr_ref (1, i__), &c__1);
  1456. d__2 = NUMblas_dnrm2 (n, &vr_ref (1, i__ + 1), &c__1);
  1457. scl = 1. / NUMlapack_dlapy2 (&d__1, &d__2);
  1458. NUMblas_dscal (n, &scl, &vr_ref (1, i__), &c__1);
  1459. NUMblas_dscal (n, &scl, &vr_ref (1, i__ + 1), &c__1);
  1460. i__2 = *n;
  1461. for (k = 1; k <= i__2; ++k) {
  1462. /* Computing 2nd power */
  1463. d__1 = vr_ref (k, i__);
  1464. /* Computing 2nd power */
  1465. d__2 = vr_ref (k, i__ + 1);
  1466. work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2;
  1467. /* L30: */
  1468. }
  1469. k = NUMblas_idamax (n, &work[iwrk], &c__1);
  1470. NUMlapack_dlartg (&vr_ref (k, i__), &vr_ref (k, i__ + 1), &cs, &sn, &r__);
  1471. NUMblas_drot (n, &vr_ref (1, i__), &c__1, &vr_ref (1, i__ + 1), &c__1, &cs, &sn);
  1472. vr_ref (k, i__ + 1) = 0.;
  1473. }
  1474. /* L40: */
  1475. }
  1476. }
  1477. /* Undo scaling if necessary */
  1478. L50:
  1479. if (scalea) {
  1480. i__1 = *n - *info;
  1481. /* Computing MAX */
  1482. i__3 = *n - *info;
  1483. i__2 = MAX (i__3, 1);
  1484. NUMlapack_dlascl ("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info + 1], &i__2, &ierr);
  1485. i__1 = *n - *info;
  1486. /* Computing MAX */
  1487. i__3 = *n - *info;
  1488. i__2 = MAX (i__3, 1);
  1489. NUMlapack_dlascl ("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info + 1], &i__2, &ierr);
  1490. if (*info > 0) {
  1491. i__1 = ilo - 1;
  1492. NUMlapack_dlascl ("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1], n, &ierr);
  1493. i__1 = ilo - 1;
  1494. NUMlapack_dlascl ("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1], n, &ierr);
  1495. }
  1496. }
  1497. work[1] = (double) maxwrk;
  1498. return 0;
  1499. } /* NUMlapack_dgeev */
  1500. #undef vr_ref
  1501. #undef vl_ref
  1502. int NUMlapack_dgehd2 (integer *n, integer *ilo, integer *ihi, double *a, integer *lda, double *tau, double *work,
  1503. integer *info) {
  1504. /* Table of constant values */
  1505. static integer c__1 = 1;
  1506. /* System generated locals */
  1507. integer a_dim1, a_offset, i__1, i__2, i__3;
  1508. /* Local variables */
  1509. static integer i__;
  1510. static double aii;
  1511. a_dim1 = *lda;
  1512. a_offset = 1 + a_dim1 * 1;
  1513. a -= a_offset;
  1514. --tau;
  1515. --work;
  1516. /* Function Body */
  1517. *info = 0;
  1518. if (*n < 0) {
  1519. *info = -1;
  1520. } else if (*ilo < 1 || *ilo > MAX (1, *n)) {
  1521. *info = -2;
  1522. } else if (*ihi < MIN (*ilo, *n) || *ihi > *n) {
  1523. *info = -3;
  1524. } else if (*lda < MAX (1, *n)) {
  1525. *info = -5;
  1526. }
  1527. if (*info != 0) {
  1528. i__1 = - (*info);
  1529. xerbla_ ("NUMlapack_dgehd2 ", &i__1);
  1530. return 0;
  1531. }
  1532. i__1 = *ihi - 1;
  1533. for (i__ = *ilo; i__ <= i__1; ++i__) {
  1534. /* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i)
  1535. Computing MIN */
  1536. i__2 = i__ + 2;
  1537. i__3 = *ihi - i__;
  1538. NUMlapack_dlarfg (&i__3, &a_ref (i__ + 1, i__), &a_ref (MIN (i__2, *n), i__), &c__1, &tau[i__]);
  1539. aii = a_ref (i__ + 1, i__);
  1540. a_ref (i__ + 1, i__) = 1.;
  1541. /* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
  1542. i__2 = *ihi - i__;
  1543. NUMlapack_dlarf ("Right", ihi, &i__2, &a_ref (i__ + 1, i__), &c__1, &tau[i__], &a_ref (1, i__ + 1),
  1544. lda, &work[1]);
  1545. /* Apply H(i) to A(i+1:ihi,i+1:n) from the left */
  1546. i__2 = *ihi - i__;
  1547. i__3 = *n - i__;
  1548. NUMlapack_dlarf ("Left", &i__2, &i__3, &a_ref (i__ + 1, i__), &c__1, &tau[i__], &a_ref (i__ + 1,
  1549. i__ + 1), lda, &work[1]);
  1550. a_ref (i__ + 1, i__) = aii;
  1551. /* L10: */
  1552. }
  1553. return 0;
  1554. } /* NUMlapack_dgehd2 */
  1555. int NUMlapack_dgehrd (integer *n, integer *ilo, integer *ihi, double *a, integer *lda, double *tau, double *work,
  1556. integer *lwork, integer *info) {
  1557. /* Table of constant values */
  1558. static integer c__1 = 1;
  1559. static integer c_n1 = -1;
  1560. static integer c__3 = 3;
  1561. static integer c__2 = 2;
  1562. static integer c__65 = 65;
  1563. static double c_b25 = -1.;
  1564. static double c_b26 = 1.;
  1565. /* System generated locals */
  1566. integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
  1567. /* Local variables */
  1568. static integer i__;
  1569. static double t[4160] /* was [65][64] */ ;
  1570. static integer nbmin, iinfo;
  1571. static integer ib;
  1572. static double ei;
  1573. static integer nb, nh;
  1574. static integer nx;
  1575. static integer ldwork, lwkopt;
  1576. static int lquery;
  1577. static integer iws;
  1578. a_dim1 = *lda;
  1579. a_offset = 1 + a_dim1 * 1;
  1580. a -= a_offset;
  1581. --tau;
  1582. --work;
  1583. /* Function Body */
  1584. *info = 0;
  1585. /* Computing MIN */
  1586. i__1 = 64, i__2 = NUMlapack_ilaenv (&c__1, "NUMlapack_dgehrd ", " ", n, ilo, ihi, &c_n1, 6, 1);
  1587. nb = MIN (i__1, i__2);
  1588. lwkopt = *n * nb;
  1589. work[1] = (double) lwkopt;
  1590. lquery = *lwork == -1;
  1591. if (*n < 0) {
  1592. *info = -1;
  1593. } else if (*ilo < 1 || *ilo > MAX (1, *n)) {
  1594. *info = -2;
  1595. } else if (*ihi < MIN (*ilo, *n) || *ihi > *n) {
  1596. *info = -3;
  1597. } else if (*lda < MAX (1, *n)) {
  1598. *info = -5;
  1599. } else if (*lwork < MAX (1, *n) && !lquery) {
  1600. *info = -8;
  1601. }
  1602. if (*info != 0) {
  1603. i__1 = - (*info);
  1604. xerbla_ ("NUMlapack_dgehrd ", &i__1);
  1605. return 0;
  1606. } else if (lquery) {
  1607. return 0;
  1608. }
  1609. /* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
  1610. i__1 = *ilo - 1;
  1611. for (i__ = 1; i__ <= i__1; ++i__) {
  1612. tau[i__] = 0.;
  1613. /* L10: */
  1614. }
  1615. i__1 = *n - 1;
  1616. for (i__ = MAX (1, *ihi); i__ <= i__1; ++i__) {
  1617. tau[i__] = 0.;
  1618. /* L20: */
  1619. }
  1620. /* Quick return if possible */
  1621. nh = *ihi - *ilo + 1;
  1622. if (nh <= 1) {
  1623. work[1] = 1.;
  1624. return 0;
  1625. }
  1626. /* Determine the block size.
  1627. Computing MIN */
  1628. i__1 = 64, i__2 = NUMlapack_ilaenv (&c__1, "NUMlapack_dgehrd ", " ", n, ilo, ihi, &c_n1, 6, 1);
  1629. nb = MIN (i__1, i__2);
  1630. nbmin = 2;
  1631. iws = 1;
  1632. if (nb > 1 && nb < nh) {
  1633. /* Determine when to cross over from blocked to unblocked code (last
  1634. block is always handled by unblocked code).
  1635. Computing MAX */
  1636. i__1 = nb, i__2 = NUMlapack_ilaenv (&c__3, "NUMlapack_dgehrd ", " ", n, ilo, ihi, &c_n1, 6, 1);
  1637. nx = MAX (i__1, i__2);
  1638. if (nx < nh) {
  1639. /* Determine if workspace is large enough for blocked code. */
  1640. iws = *n * nb;
  1641. if (*lwork < iws) {
  1642. /* Not enough workspace to use optimal NB: determine the
  1643. minimum value of NB, and reduce NB or force use of
  1644. unblocked code.
  1645. Computing MAX */
  1646. i__1 = 2, i__2 =
  1647. NUMlapack_ilaenv (&c__2, "NUMlapack_dgehrd ", " ", n, ilo, ihi, &c_n1, 6, 1);
  1648. nbmin = MAX (i__1, i__2);
  1649. if (*lwork >= *n * nbmin) {
  1650. nb = *lwork / *n;
  1651. } else {
  1652. nb = 1;
  1653. }
  1654. }
  1655. }
  1656. }
  1657. ldwork = *n;
  1658. if (nb < nbmin || nb >= nh) {
  1659. /* Use unblocked code below */
  1660. i__ = *ilo;
  1661. } else {
  1662. /* Use blocked code */
  1663. i__1 = *ihi - 1 - nx;
  1664. i__2 = nb;
  1665. for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  1666. /* Computing MIN */
  1667. i__3 = nb, i__4 = *ihi - i__;
  1668. ib = MIN (i__3, i__4);
  1669. /* Reduce columns i:i+ib-1 to Hessenberg form, returning the
  1670. matrices V and T of the block reflector H = I - V*T*V' which
  1671. performs the reduction, and also the matrix Y = A*V*T */
  1672. NUMlapack_dlahrd (ihi, &i__, &ib, &a_ref (1, i__), lda, &tau[i__], t, &c__65, &work[1], &ldwork);
  1673. /* Apply the block reflector H to A(1:ihi,i+ib:ihi) from the
  1674. right, computing A := A - Y * V'. V(i+ib,ib-1) should be set to
  1675. 1. */
  1676. ei = a_ref (i__ + ib, i__ + ib - 1);
  1677. a_ref (i__ + ib, i__ + ib - 1) = 1.;
  1678. i__3 = *ihi - i__ - ib + 1;
  1679. NUMblas_dgemm ("No transpose", "Transpose", ihi, &i__3, &ib, &c_b25, &work[1], &ldwork,
  1680. &a_ref (i__ + ib, i__), lda, &c_b26, &a_ref (1, i__ + ib), lda);
  1681. a_ref (i__ + ib, i__ + ib - 1) = ei;
  1682. /* Apply the block reflector H to A(i+1:ihi,i+ib:n) from the left
  1683. */
  1684. i__3 = *ihi - i__;
  1685. i__4 = *n - i__ - ib + 1;
  1686. NUMlapack_dlarfb ("Left", "Transpose", "Forward", "Columnwise", &i__3, &i__4, &ib,
  1687. &a_ref (i__ + 1, i__), lda, t, &c__65, &a_ref (i__ + 1, i__ + ib), lda, &work[1], &ldwork);
  1688. /* L30: */
  1689. }
  1690. }
  1691. /* Use unblocked code to reduce the rest of the matrix */
  1692. NUMlapack_dgehd2 (n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
  1693. work[1] = (double) iws;
  1694. return 0;
  1695. } /* NUMlapack_dgehrd */
  1696. int NUMlapack_dgelq2 (integer *m, integer *n, double *a, integer *lda, double *tau, double *work, integer *info) {
  1697. /* System generated locals */
  1698. integer a_dim1, a_offset, i__1, i__2, i__3;
  1699. /* Local variables */
  1700. static integer i__, k;
  1701. static double aii;
  1702. a_dim1 = *lda;
  1703. a_offset = 1 + a_dim1 * 1;
  1704. a -= a_offset;
  1705. --tau;
  1706. --work;
  1707. /* Function Body */
  1708. *info = 0;
  1709. if (*m < 0) {
  1710. *info = -1;
  1711. } else if (*n < 0) {
  1712. *info = -2;
  1713. } else if (*lda < MAX (1, *m)) {
  1714. *info = -4;
  1715. }
  1716. if (*info != 0) {
  1717. i__1 = - (*info);
  1718. xerbla_ ("DGELQ2", &i__1);
  1719. return 0;
  1720. }
  1721. k = MIN (*m, *n);
  1722. i__1 = k;
  1723. for (i__ = 1; i__ <= i__1; ++i__) {
  1724. /* Generate elementary reflector H(i) to annihilate A(i,i+1:n)
  1725. Computing MIN */
  1726. i__2 = i__ + 1;
  1727. i__3 = *n - i__ + 1;
  1728. NUMlapack_dlarfg (&i__3, &a_ref (i__, i__), &a_ref (i__, MIN (i__2, *n)), lda, &tau[i__]);
  1729. if (i__ < *m) {
  1730. /* Apply H(i) to A(i+1:m,i:n) from the right */
  1731. aii = a_ref (i__, i__);
  1732. a_ref (i__, i__) = 1.;
  1733. i__2 = *m - i__;
  1734. i__3 = *n - i__ + 1;
  1735. NUMlapack_dlarf ("Right", &i__2, &i__3, &a_ref (i__, i__), lda, &tau[i__], &a_ref (i__ + 1, i__),
  1736. lda, &work[1]);
  1737. a_ref (i__, i__) = aii;
  1738. }
  1739. /* L10: */
  1740. }
  1741. return 0;
  1742. } /* NUMlapack_dgelq2 */
  1743. int NUMlapack_dgelqf (integer *m, integer *n, double *a, integer *lda, double *tau, double *work, integer *lwork,
  1744. integer *info) {
  1745. /* Table of constant values */
  1746. static integer c__1 = 1;
  1747. static integer c_n1 = -1;
  1748. static integer c__3 = 3;
  1749. static integer c__2 = 2;
  1750. /* System generated locals */
  1751. integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
  1752. /* Local variables */
  1753. static integer i__, k, nbmin, iinfo;
  1754. static integer ib, nb;
  1755. static integer nx;
  1756. static integer ldwork, lwkopt;
  1757. static integer lquery;
  1758. static integer iws;
  1759. a_dim1 = *lda;
  1760. a_offset = 1 + a_dim1 * 1;
  1761. a -= a_offset;
  1762. --tau;
  1763. --work;
  1764. /* Function Body */
  1765. *info = 0;
  1766. nb = NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  1767. lwkopt = *m * nb;
  1768. work[1] = (double) lwkopt;
  1769. lquery = *lwork == -1;
  1770. if (*m < 0) {
  1771. *info = -1;
  1772. } else if (*n < 0) {
  1773. *info = -2;
  1774. } else if (*lda < MAX (1, *m)) {
  1775. *info = -4;
  1776. } else if (*lwork < MAX (1, *m) && !lquery) {
  1777. *info = -7;
  1778. }
  1779. if (*info != 0) {
  1780. i__1 = - (*info);
  1781. xerbla_ ("DGELQF", &i__1);
  1782. return 0;
  1783. } else if (lquery) {
  1784. return 0;
  1785. }
  1786. /* Quick return if possible */
  1787. k = MIN (*m, *n);
  1788. if (k == 0) {
  1789. work[1] = 1.;
  1790. return 0;
  1791. }
  1792. nbmin = 2;
  1793. nx = 0;
  1794. iws = *m;
  1795. if (nb > 1 && nb < k) {
  1796. /* Determine when to cross over from blocked to unblocked code.
  1797. Computing MAX */
  1798. i__1 = 0, i__2 = NUMlapack_ilaenv (&c__3, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  1799. nx = MAX (i__1, i__2);
  1800. if (nx < k) {
  1801. /* Determine if workspace is large enough for blocked code. */
  1802. ldwork = *m;
  1803. iws = ldwork * nb;
  1804. if (*lwork < iws) {
  1805. /* Not enough workspace to use optimal NB: reduce NB and
  1806. determine the minimum value of NB. */
  1807. nb = *lwork / ldwork;
  1808. /* Computing MAX */
  1809. i__1 = 2, i__2 = NUMlapack_ilaenv (&c__2, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  1810. nbmin = MAX (i__1, i__2);
  1811. }
  1812. }
  1813. }
  1814. if (nb >= nbmin && nb < k && nx < k) {
  1815. /* Use blocked code initially */
  1816. i__1 = k - nx;
  1817. i__2 = nb;
  1818. for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  1819. /* Computing MIN */
  1820. i__3 = k - i__ + 1;
  1821. ib = MIN (i__3, nb);
  1822. /* Compute the LQ factorization of the current block
  1823. A(i:i+ib-1,i:n) */
  1824. i__3 = *n - i__ + 1;
  1825. NUMlapack_dgelq2 (&ib, &i__3, &a_ref (i__, i__), lda, &tau[i__], &work[1], &iinfo);
  1826. if (i__ + ib <= *m) {
  1827. /* Form the triangular factor of the block reflector H = H(i)
  1828. H(i+1) . . . H(i+ib-1) */
  1829. i__3 = *n - i__ + 1;
  1830. NUMlapack_dlarft ("Forward", "Rowwise", &i__3, &ib, &a_ref (i__, i__), lda, &tau[i__], &work[1],
  1831. &ldwork);
  1832. /* Apply H to A(i+ib:m,i:n) from the right */
  1833. i__3 = *m - i__ - ib + 1;
  1834. i__4 = *n - i__ + 1;
  1835. NUMlapack_dlarfb ("Right", "No transpose", "Forward", "Rowwise", &i__3, &i__4, &ib, &a_ref (i__,
  1836. i__), lda, &work[1], &ldwork, &a_ref (i__ + ib, i__), lda, &work[ib + 1], &ldwork);
  1837. }
  1838. /* L10: */
  1839. }
  1840. } else {
  1841. i__ = 1;
  1842. }
  1843. /* Use unblocked code to factor the last or only block. */
  1844. if (i__ <= k) {
  1845. i__2 = *m - i__ + 1;
  1846. i__1 = *n - i__ + 1;
  1847. NUMlapack_dgelq2 (&i__2, &i__1, &a_ref (i__, i__), lda, &tau[i__], &work[1], &iinfo);
  1848. }
  1849. work[1] = (double) iws;
  1850. return 0;
  1851. } /* NUMlapack_dgelqf */
  1852. #define b_ref(a_1,a_2) b[(a_2)*b_dim1 + a_1]
  1853. int NUMlapack_dgelss (integer *m, integer *n, integer *nrhs, double *a, integer *lda, double *b, integer *ldb, double *s,
  1854. double *rcond, integer *rank, double *work, integer *lwork, integer *info) {
  1855. /* System generated locals */
  1856. integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
  1857. double d__1;
  1858. /* Local variables */
  1859. static double anrm, bnrm;
  1860. static integer itau;
  1861. static double vdum[1];
  1862. static integer i__;
  1863. static integer iascl, ibscl;
  1864. static integer chunk;
  1865. static double sfmin;
  1866. static integer minmn;
  1867. static integer maxmn, itaup, itauq, mnthr, iwork;
  1868. static integer bl, ie, il;
  1869. static integer mm;
  1870. static integer bdspac;
  1871. static double bignum;
  1872. static integer ldwork;
  1873. static integer minwrk, maxwrk;
  1874. static double smlnum;
  1875. static integer lquery;
  1876. static double eps, thr;
  1877. a_dim1 = *lda;
  1878. a_offset = 1 + a_dim1 * 1;
  1879. a -= a_offset;
  1880. b_dim1 = *ldb;
  1881. b_offset = 1 + b_dim1 * 1;
  1882. b -= b_offset;
  1883. --s;
  1884. --work;
  1885. /* Function Body */
  1886. *info = 0;
  1887. minmn = MIN (*m, *n);
  1888. maxmn = MAX (*m, *n);
  1889. mnthr = NUMlapack_ilaenv (&c__6, "DGELSS", " ", m, n, nrhs, &c_n1, 6, 1);
  1890. lquery = *lwork == -1;
  1891. if (*m < 0) {
  1892. *info = -1;
  1893. } else if (*n < 0) {
  1894. *info = -2;
  1895. } else if (*nrhs < 0) {
  1896. *info = -3;
  1897. } else if (*lda < MAX (1, *m)) {
  1898. *info = -5;
  1899. } else if (*ldb < MAX (1, maxmn)) {
  1900. *info = -7;
  1901. }
  1902. /* Compute workspace (Note: Comments in the code beginning "Workspace:"
  1903. describe the minimal amount of workspace needed at that point in the
  1904. code, as well as the preferred amount for good performance. NB refers
  1905. to the optimal block size for the immediately following subroutine, as
  1906. returned by ILAENV.) */
  1907. minwrk = 1;
  1908. if (*info == 0 && (*lwork >= 1 || lquery)) {
  1909. maxwrk = 0;
  1910. mm = *m;
  1911. if (*m >= *n && *m >= mnthr) {
  1912. /* Path 1a - overdetermined, with many more rows than columns */
  1913. mm = *n;
  1914. /* Computing MAX */
  1915. i__1 = maxwrk, i__2 = *n + *n * NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  1916. maxwrk = MAX (i__1, i__2);
  1917. /* Computing MAX */
  1918. i__1 = maxwrk, i__2 =
  1919. *n + *nrhs * NUMlapack_ilaenv (&c__1, "DORMQR", "LT", m, nrhs, n, &c_n1, 6, 2);
  1920. maxwrk = MAX (i__1, i__2);
  1921. }
  1922. if (*m >= *n) {
  1923. /* Path 1 - overdetermined or exactly determined
  1924. Compute workspace needed for DBDSQR
  1925. Computing MAX */
  1926. i__1 = 1, i__2 = *n * 5;
  1927. bdspac = MAX (i__1, i__2);
  1928. /* Computing MAX */
  1929. i__1 = maxwrk, i__2 =
  1930. *n * 3 + (mm + *n) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", &mm, n, &c_n1, &c_n1, 6, 1);
  1931. maxwrk = MAX (i__1, i__2);
  1932. /* Computing MAX */
  1933. i__1 = maxwrk, i__2 =
  1934. *n * 3 + *nrhs * NUMlapack_ilaenv (&c__1, "DORMBR", "QLT", &mm, nrhs, n, &c_n1, 6, 3);
  1935. maxwrk = MAX (i__1, i__2);
  1936. /* Computing MAX */
  1937. i__1 = maxwrk, i__2 =
  1938. *n * 3 + (*n - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", n, n, n, &c_n1, 6, 1);
  1939. maxwrk = MAX (i__1, i__2);
  1940. maxwrk = MAX (maxwrk, bdspac);
  1941. /* Computing MAX */
  1942. i__1 = maxwrk, i__2 = *n * *nrhs;
  1943. maxwrk = MAX (i__1, i__2);
  1944. /* Computing MAX */
  1945. i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = MAX (i__1, i__2);
  1946. minwrk = MAX (i__1, bdspac);
  1947. maxwrk = MAX (minwrk, maxwrk);
  1948. }
  1949. if (*n > *m) {
  1950. /* Compute workspace needed for DBDSQR
  1951. Computing MAX */
  1952. i__1 = 1, i__2 = *m * 5;
  1953. bdspac = MAX (i__1, i__2);
  1954. /* Computing MAX */
  1955. i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *n, i__1 = MAX (i__1, i__2);
  1956. minwrk = MAX (i__1, bdspac);
  1957. if (*n >= mnthr) {
  1958. /* Path 2a - underdetermined, with many more columns than
  1959. rows */
  1960. maxwrk = *m + *m * NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  1961. /* Computing MAX */
  1962. i__1 = maxwrk, i__2 =
  1963. *m * *m + (*m << 2) + (*m << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, m, &c_n1,
  1964. &c_n1, 6, 1);
  1965. maxwrk = MAX (i__1, i__2);
  1966. /* Computing MAX */
  1967. i__1 = maxwrk, i__2 =
  1968. *m * *m + (*m << 2) + *nrhs * NUMlapack_ilaenv (&c__1, "DORMBR", "QLT", m, nrhs, m, &c_n1,
  1969. 6, 3);
  1970. maxwrk = MAX (i__1, i__2);
  1971. /* Computing MAX */
  1972. i__1 = maxwrk, i__2 =
  1973. *m * *m + (*m << 2) + (*m - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", m, m, m, &c_n1, 6,
  1974. 1);
  1975. maxwrk = MAX (i__1, i__2);
  1976. /* Computing MAX */
  1977. i__1 = maxwrk, i__2 = *m * *m + *m + bdspac;
  1978. maxwrk = MAX (i__1, i__2);
  1979. if (*nrhs > 1) {
  1980. /* Computing MAX */
  1981. i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
  1982. maxwrk = MAX (i__1, i__2);
  1983. } else {
  1984. /* Computing MAX */
  1985. i__1 = maxwrk, i__2 = *m * *m + (*m << 1);
  1986. maxwrk = MAX (i__1, i__2);
  1987. }
  1988. /* Computing MAX */
  1989. i__1 = maxwrk, i__2 =
  1990. *m + *nrhs * NUMlapack_ilaenv (&c__1, "DORMLQ", "LT", n, nrhs, m, &c_n1, 6, 2);
  1991. maxwrk = MAX (i__1, i__2);
  1992. } else {
  1993. /* Path 2 - underdetermined */
  1994. maxwrk = *m * 3 + (*n + *m) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, 6, 1);
  1995. /* Computing MAX */
  1996. i__1 = maxwrk, i__2 =
  1997. *m * 3 + *nrhs * NUMlapack_ilaenv (&c__1, "DORMBR", "QLT", m, nrhs, m, &c_n1, 6, 3);
  1998. maxwrk = MAX (i__1, i__2);
  1999. /* Computing MAX */
  2000. i__1 = maxwrk, i__2 =
  2001. *m * 3 + *m * NUMlapack_ilaenv (&c__1, "DORGBR", "P", m, n, m, &c_n1, 6, 1);
  2002. maxwrk = MAX (i__1, i__2);
  2003. maxwrk = MAX (maxwrk, bdspac);
  2004. /* Computing MAX */
  2005. i__1 = maxwrk, i__2 = *n * *nrhs;
  2006. maxwrk = MAX (i__1, i__2);
  2007. }
  2008. }
  2009. maxwrk = MAX (minwrk, maxwrk);
  2010. work[1] = (double) maxwrk;
  2011. }
  2012. minwrk = MAX (minwrk, 1);
  2013. if (*lwork < minwrk && !lquery) {
  2014. *info = -12;
  2015. }
  2016. if (*info != 0) {
  2017. i__1 = - (*info);
  2018. xerbla_ ("DGELSS", &i__1);
  2019. return 0;
  2020. } else if (lquery) {
  2021. return 0;
  2022. }
  2023. /* Quick return if possible */
  2024. if (*m == 0 || *n == 0) {
  2025. *rank = 0;
  2026. return 0;
  2027. }
  2028. /* Get machine parameters */
  2029. eps = NUMblas_dlamch ("P");
  2030. sfmin = NUMblas_dlamch ("S");
  2031. smlnum = sfmin / eps;
  2032. bignum = 1. / smlnum;
  2033. NUMlapack_dlabad (&smlnum, &bignum);
  2034. /* Scale A if max element outside range [SMLNUM,BIGNUM] */
  2035. anrm = NUMlapack_dlange ("M", m, n, &a[a_offset], lda, &work[1]);
  2036. iascl = 0;
  2037. if (anrm > 0. && anrm < smlnum) {
  2038. /* Scale matrix norm up to SMLNUM */
  2039. NUMlapack_dlascl ("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, info);
  2040. iascl = 1;
  2041. } else if (anrm > bignum) {
  2042. /* Scale matrix norm down to BIGNUM */
  2043. NUMlapack_dlascl ("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, info);
  2044. iascl = 2;
  2045. } else if (anrm == 0.) {
  2046. /* Matrix all zero. Return zero solution. */
  2047. i__1 = MAX (*m, *n);
  2048. NUMlapack_dlaset ("F", &i__1, nrhs, &c_b74, &c_b74, &b[b_offset], ldb);
  2049. NUMlapack_dlaset ("F", &minmn, &c__1, &c_b74, &c_b74, &s[1], &c__1);
  2050. *rank = 0;
  2051. goto L70;
  2052. }
  2053. /* Scale B if max element outside range [SMLNUM,BIGNUM] */
  2054. bnrm = NUMlapack_dlange ("M", m, nrhs, &b[b_offset], ldb, &work[1]);
  2055. ibscl = 0;
  2056. if (bnrm > 0. && bnrm < smlnum) {
  2057. /* Scale matrix norm up to SMLNUM */
  2058. NUMlapack_dlascl ("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb, info);
  2059. ibscl = 1;
  2060. } else if (bnrm > bignum) {
  2061. /* Scale matrix norm down to BIGNUM */
  2062. NUMlapack_dlascl ("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb, info);
  2063. ibscl = 2;
  2064. }
  2065. /* Overdetermined case */
  2066. if (*m >= *n) {
  2067. /* Path 1 - overdetermined or exactly determined */
  2068. mm = *m;
  2069. if (*m >= mnthr) {
  2070. /* Path 1a - overdetermined, with many more rows than columns */
  2071. mm = *n;
  2072. itau = 1;
  2073. iwork = itau + *n;
  2074. /* Compute A=Q*R (Workspace: need 2*N, prefer N+N*NB) */
  2075. i__1 = *lwork - iwork + 1;
  2076. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__1, info);
  2077. /* Multiply B by transpose(Q) (Workspace: need N+NRHS, prefer
  2078. N+NRHS*NB) */
  2079. i__1 = *lwork - iwork + 1;
  2080. NUMlapack_dormqr ("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[b_offset], ldb,
  2081. &work[iwork], &i__1, info);
  2082. /* Zero out below R */
  2083. if (*n > 1) {
  2084. i__1 = *n - 1;
  2085. i__2 = *n - 1;
  2086. NUMlapack_dlaset ("L", &i__1, &i__2, &c_b74, &c_b74, &a_ref (2, 1), lda);
  2087. }
  2088. }
  2089. ie = 1;
  2090. itauq = ie + *n;
  2091. itaup = itauq + *n;
  2092. iwork = itaup + *n;
  2093. /* Bidiagonalize R in A (Workspace: need 3*N+MM, prefer
  2094. 3*N+(MM+N)*NB) */
  2095. i__1 = *lwork - iwork + 1;
  2096. NUMlapack_dgebrd (&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup], &work[iwork],
  2097. &i__1, info);
  2098. /* Multiply B by transpose of left bidiagonalizing vectors of R
  2099. (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB) */
  2100. i__1 = *lwork - iwork + 1;
  2101. NUMlapack_dormbr ("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq], &b[b_offset], ldb,
  2102. &work[iwork], &i__1, info);
  2103. /* Generate right bidiagonalizing vectors of R in A (Workspace: need
  2104. 4*N-1, prefer 3*N+(N-1)*NB) */
  2105. i__1 = *lwork - iwork + 1;
  2106. NUMlapack_dorgbr ("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[iwork], &i__1, info);
  2107. iwork = ie + *n;
  2108. /* Perform bidiagonal QR iteration multiply B by transpose of left
  2109. singular vectors compute right singular vectors in A (Workspace:
  2110. need BDSPAC) */
  2111. NUMlapack_dbdsqr ("U", n, n, &c__0, nrhs, &s[1], &work[ie], &a[a_offset], lda, vdum, &c__1,
  2112. &b[b_offset], ldb, &work[iwork], info);
  2113. if (*info != 0) {
  2114. goto L70;
  2115. }
  2116. /* Multiply B by reciprocals of singular values
  2117. Computing MAX */
  2118. d__1 = *rcond * s[1];
  2119. thr = MAX (d__1, sfmin);
  2120. if (*rcond < 0.) {
  2121. /* Computing MAX */
  2122. d__1 = eps * s[1];
  2123. thr = MAX (d__1, sfmin);
  2124. }
  2125. *rank = 0;
  2126. i__1 = *n;
  2127. for (i__ = 1; i__ <= i__1; ++i__) {
  2128. if (s[i__] > thr) {
  2129. NUMlapack_drscl (nrhs, &s[i__], &b_ref (i__, 1), ldb);
  2130. ++ (*rank);
  2131. } else {
  2132. NUMlapack_dlaset ("F", &c__1, nrhs, &c_b74, &c_b74, &b_ref (i__, 1), ldb);
  2133. }
  2134. /* L10: */
  2135. }
  2136. /* Multiply B by right singular vectors (Workspace: need N, prefer
  2137. N*NRHS) */
  2138. if (*lwork >= *ldb * *nrhs && *nrhs > 1) {
  2139. NUMblas_dgemm ("T", "N", n, nrhs, n, &c_b108, &a[a_offset], lda, &b[b_offset], ldb, &c_b74, &work[1],
  2140. ldb);
  2141. NUMlapack_dlacpy ("G", n, nrhs, &work[1], ldb, &b[b_offset], ldb);
  2142. } else if (*nrhs > 1) {
  2143. chunk = *lwork / *n;
  2144. i__1 = *nrhs;
  2145. i__2 = chunk;
  2146. for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  2147. /* Computing MIN */
  2148. i__3 = *nrhs - i__ + 1;
  2149. bl = MIN (i__3, chunk);
  2150. NUMblas_dgemm ("T", "N", n, &bl, n, &c_b108, &a[a_offset], lda, &b_ref (1, i__), ldb, &c_b74,
  2151. &work[1], n);
  2152. NUMlapack_dlacpy ("G", n, &bl, &work[1], n, &b_ref (1, i__), ldb);
  2153. /* L20: */
  2154. }
  2155. } else {
  2156. NUMblas_dgemv ("T", n, n, &c_b108, &a[a_offset], lda, &b[b_offset], &c__1, &c_b74, &work[1], &c__1);
  2157. NUMblas_dcopy (n, &work[1], &c__1, &b[b_offset], &c__1);
  2158. }
  2159. } else { /* if(complicated condition) */
  2160. /* Computing MAX */
  2161. i__2 = *m, i__1 = (*m << 1) - 4, i__2 = MAX (i__2, i__1), i__2 = MAX (i__2, *nrhs), i__1 =
  2162. *n - *m * 3;
  2163. if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + MAX (i__2, i__1)) {
  2164. /* Path 2a - underdetermined, with many more columns than rows
  2165. and sufficient workspace for an efficient algorithm */
  2166. ldwork = *m;
  2167. /* Computing MAX Computing MAX */
  2168. i__3 = *m, i__4 = (*m << 1) - 4, i__3 = MAX (i__3, i__4), i__3 = MAX (i__3, *nrhs), i__4 =
  2169. *n - *m * 3;
  2170. i__2 = (*m << 2) + *m * *lda + MAX (i__3, i__4), i__1 = *m * *lda + *m + *m * *nrhs;
  2171. if (*lwork >= MAX (i__2, i__1)) {
  2172. ldwork = *lda;
  2173. }
  2174. itau = 1;
  2175. iwork = *m + 1;
  2176. /* Compute A=L*Q (Workspace: need 2*M, prefer M+M*NB) */
  2177. i__2 = *lwork - iwork + 1;
  2178. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, info);
  2179. il = iwork;
  2180. /* Copy L to WORK(IL), zeroing out above it */
  2181. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
  2182. i__2 = *m - 1;
  2183. i__1 = *m - 1;
  2184. NUMlapack_dlaset ("U", &i__2, &i__1, &c_b74, &c_b74, &work[il + ldwork], &ldwork);
  2185. ie = il + ldwork * *m;
  2186. itauq = ie + *m;
  2187. itaup = itauq + *m;
  2188. iwork = itaup + *m;
  2189. /* Bidiagonalize L in WORK(IL) (Workspace: need M*M+5*M, prefer
  2190. M*M+4*M+2*M*NB) */
  2191. i__2 = *lwork - iwork + 1;
  2192. NUMlapack_dgebrd (m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq], &work[itaup],
  2193. &work[iwork], &i__2, info);
  2194. /* Multiply B by transpose of left bidiagonalizing vectors of L
  2195. (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) */
  2196. i__2 = *lwork - iwork + 1;
  2197. NUMlapack_dormbr ("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[itauq], &b[b_offset], ldb,
  2198. &work[iwork], &i__2, info);
  2199. /* Generate right bidiagonalizing vectors of R in WORK(IL)
  2200. (Workspace: need M*M+5*M-1, prefer M*M+4*M+(M-1)*NB) */
  2201. i__2 = *lwork - iwork + 1;
  2202. NUMlapack_dorgbr ("P", m, m, m, &work[il], &ldwork, &work[itaup], &work[iwork], &i__2, info);
  2203. iwork = ie + *m;
  2204. /* Perform bidiagonal QR iteration, computing right singular
  2205. vectors of L in WORK(IL) and multiplying B by transpose of
  2206. left singular vectors (Workspace: need M*M+M+BDSPAC) */
  2207. NUMlapack_dbdsqr ("U", m, m, &c__0, nrhs, &s[1], &work[ie], &work[il], &ldwork, &a[a_offset], lda,
  2208. &b[b_offset], ldb, &work[iwork], info);
  2209. if (*info != 0) {
  2210. goto L70;
  2211. }
  2212. /* Multiply B by reciprocals of singular values
  2213. Computing MAX */
  2214. d__1 = *rcond * s[1];
  2215. thr = MAX (d__1, sfmin);
  2216. if (*rcond < 0.) {
  2217. /* Computing MAX */
  2218. d__1 = eps * s[1];
  2219. thr = MAX (d__1, sfmin);
  2220. }
  2221. *rank = 0;
  2222. i__2 = *m;
  2223. for (i__ = 1; i__ <= i__2; ++i__) {
  2224. if (s[i__] > thr) {
  2225. NUMlapack_drscl (nrhs, &s[i__], &b_ref (i__, 1), ldb);
  2226. ++ (*rank);
  2227. } else {
  2228. NUMlapack_dlaset ("F", &c__1, nrhs, &c_b74, &c_b74, &b_ref (i__, 1), ldb);
  2229. }
  2230. /* L30: */
  2231. }
  2232. iwork = ie;
  2233. /* Multiply B by right singular vectors of L in WORK(IL)
  2234. (Workspace: need M*M+2*M, prefer M*M+M+M*NRHS) */
  2235. if (*lwork >= *ldb * *nrhs + iwork - 1 && *nrhs > 1) {
  2236. NUMblas_dgemm ("T", "N", m, nrhs, m, &c_b108, &work[il], &ldwork, &b[b_offset], ldb, &c_b74,
  2237. &work[iwork], ldb);
  2238. NUMlapack_dlacpy ("G", m, nrhs, &work[iwork], ldb, &b[b_offset], ldb);
  2239. } else if (*nrhs > 1) {
  2240. chunk = (*lwork - iwork + 1) / *m;
  2241. i__2 = *nrhs;
  2242. i__1 = chunk;
  2243. for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
  2244. /* Computing MIN */
  2245. i__3 = *nrhs - i__ + 1;
  2246. bl = MIN (i__3, chunk);
  2247. NUMblas_dgemm ("T", "N", m, &bl, m, &c_b108, &work[il], &ldwork, &b_ref (1, i__), ldb, &c_b74,
  2248. &work[iwork], n);
  2249. NUMlapack_dlacpy ("G", m, &bl, &work[iwork], n, &b_ref (1, i__), ldb);
  2250. /* L40: */
  2251. }
  2252. } else {
  2253. NUMblas_dgemv ("T", m, m, &c_b108, &work[il], &ldwork, &b_ref (1, 1), &c__1, &c_b74, &work[iwork],
  2254. &c__1);
  2255. NUMblas_dcopy (m, &work[iwork], &c__1, &b_ref (1, 1), &c__1);
  2256. }
  2257. /* Zero out below first M rows of B */
  2258. i__1 = *n - *m;
  2259. NUMlapack_dlaset ("F", &i__1, nrhs, &c_b74, &c_b74, &b_ref (*m + 1, 1), ldb);
  2260. iwork = itau + *m;
  2261. /* Multiply transpose(Q) by B (Workspace: need M+NRHS, prefer
  2262. M+NRHS*NB) */
  2263. i__1 = *lwork - iwork + 1;
  2264. NUMlapack_dormlq ("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[b_offset], ldb,
  2265. &work[iwork], &i__1, info);
  2266. } else {
  2267. /* Path 2 - remaining underdetermined cases */
  2268. ie = 1;
  2269. itauq = ie + *m;
  2270. itaup = itauq + *m;
  2271. iwork = itaup + *m;
  2272. /* Bidiagonalize A (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
  2273. i__1 = *lwork - iwork + 1;
  2274. NUMlapack_dgebrd (m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  2275. &work[iwork], &i__1, info);
  2276. /* Multiply B by transpose of left bidiagonalizing vectors
  2277. (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB) */
  2278. i__1 = *lwork - iwork + 1;
  2279. NUMlapack_dormbr ("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq], &b[b_offset], ldb,
  2280. &work[iwork], &i__1, info);
  2281. /* Generate right bidiagonalizing vectors in A (Workspace: need
  2282. 4*M, prefer 3*M+M*NB) */
  2283. i__1 = *lwork - iwork + 1;
  2284. NUMlapack_dorgbr ("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[iwork], &i__1, info);
  2285. iwork = ie + *m;
  2286. /* Perform bidiagonal QR iteration, computing right singular
  2287. vectors of A in A and multiplying B by transpose of left
  2288. singular vectors (Workspace: need BDSPAC) */
  2289. NUMlapack_dbdsqr ("L", m, n, &c__0, nrhs, &s[1], &work[ie], &a[a_offset], lda, vdum, &c__1,
  2290. &b[b_offset], ldb, &work[iwork], info);
  2291. if (*info != 0) {
  2292. goto L70;
  2293. }
  2294. /* Multiply B by reciprocals of singular values
  2295. Computing MAX */
  2296. d__1 = *rcond * s[1];
  2297. thr = MAX (d__1, sfmin);
  2298. if (*rcond < 0.) {
  2299. /* Computing MAX */
  2300. d__1 = eps * s[1];
  2301. thr = MAX (d__1, sfmin);
  2302. }
  2303. *rank = 0;
  2304. i__1 = *m;
  2305. for (i__ = 1; i__ <= i__1; ++i__) {
  2306. if (s[i__] > thr) {
  2307. NUMlapack_drscl (nrhs, &s[i__], &b_ref (i__, 1), ldb);
  2308. ++ (*rank);
  2309. } else {
  2310. NUMlapack_dlaset ("F", &c__1, nrhs, &c_b74, &c_b74, &b_ref (i__, 1), ldb);
  2311. }
  2312. /* L50: */
  2313. }
  2314. /* Multiply B by right singular vectors of A (Workspace: need N,
  2315. prefer N*NRHS) */
  2316. if (*lwork >= *ldb * *nrhs && *nrhs > 1) {
  2317. NUMblas_dgemm ("T", "N", n, nrhs, m, &c_b108, &a[a_offset], lda, &b[b_offset], ldb, &c_b74, &work[1],
  2318. ldb);
  2319. NUMlapack_dlacpy ("F", n, nrhs, &work[1], ldb, &b[b_offset], ldb);
  2320. } else if (*nrhs > 1) {
  2321. chunk = *lwork / *n;
  2322. i__1 = *nrhs;
  2323. i__2 = chunk;
  2324. for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  2325. /* Computing MIN */
  2326. i__3 = *nrhs - i__ + 1;
  2327. bl = MIN (i__3, chunk);
  2328. NUMblas_dgemm ("T", "N", n, &bl, m, &c_b108, &a[a_offset], lda, &b_ref (1, i__), ldb, &c_b74,
  2329. &work[1], n);
  2330. NUMlapack_dlacpy ("F", n, &bl, &work[1], n, &b_ref (1, i__), ldb);
  2331. /* L60: */
  2332. }
  2333. } else {
  2334. NUMblas_dgemv ("T", m, n, &c_b108, &a[a_offset], lda, &b[b_offset], &c__1, &c_b74, &work[1], &c__1);
  2335. NUMblas_dcopy (n, &work[1], &c__1, &b[b_offset], &c__1);
  2336. }
  2337. }
  2338. }
  2339. /* Undo scaling */
  2340. if (iascl == 1) {
  2341. NUMlapack_dlascl ("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb, info);
  2342. NUMlapack_dlascl ("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &minmn, info);
  2343. } else if (iascl == 2) {
  2344. NUMlapack_dlascl ("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb, info);
  2345. NUMlapack_dlascl ("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &minmn, info);
  2346. }
  2347. if (ibscl == 1) {
  2348. NUMlapack_dlascl ("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb, info);
  2349. } else if (ibscl == 2) {
  2350. NUMlapack_dlascl ("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb, info);
  2351. }
  2352. L70:
  2353. work[1] = (double) maxwrk;
  2354. return 0;
  2355. } /* NUMlapack_dgelss */
  2356. #undef b_ref
  2357. int NUMlapack_dgeqpf (integer *m, integer *n, double *a, integer *lda, integer *jpvt, double *tau, double *work, integer *info) {
  2358. /* Table of constant values */
  2359. static integer c__1 = 1;
  2360. /* System generated locals */
  2361. integer a_dim1, a_offset, i__1, i__2, i__3;
  2362. double d__1, d__2;
  2363. /* Local variables */
  2364. static double temp;
  2365. static double temp2;
  2366. static integer i__, j;
  2367. static integer itemp;
  2368. static integer ma, mn;
  2369. static double aii;
  2370. static integer pvt;
  2371. a_dim1 = *lda;
  2372. a_offset = 1 + a_dim1 * 1;
  2373. a -= a_offset;
  2374. --jpvt;
  2375. --tau;
  2376. --work;
  2377. /* Function Body */
  2378. *info = 0;
  2379. if (*m < 0) {
  2380. *info = -1;
  2381. } else if (*n < 0) {
  2382. *info = -2;
  2383. } else if (*lda < MAX (1, *m)) {
  2384. *info = -4;
  2385. }
  2386. if (*info != 0) {
  2387. i__1 = - (*info);
  2388. xerbla_ ("DGEQPF", &i__1);
  2389. return 0;
  2390. }
  2391. mn = MIN (*m, *n);
  2392. /* Move initial columns up front */
  2393. itemp = 1;
  2394. i__1 = *n;
  2395. for (i__ = 1; i__ <= i__1; ++i__) {
  2396. if (jpvt[i__] != 0) {
  2397. if (i__ != itemp) {
  2398. NUMblas_dswap (m, &a_ref (1, i__), &c__1, &a_ref (1, itemp), &c__1);
  2399. jpvt[i__] = jpvt[itemp];
  2400. jpvt[itemp] = i__;
  2401. } else {
  2402. jpvt[i__] = i__;
  2403. }
  2404. ++itemp;
  2405. } else {
  2406. jpvt[i__] = i__;
  2407. }
  2408. /* L10: */
  2409. }
  2410. --itemp;
  2411. /* Compute the QR factorization and update remaining columns */
  2412. if (itemp > 0) {
  2413. ma = MIN (itemp, *m);
  2414. NUMlapack_dgeqr2 (m, &ma, &a[a_offset], lda, &tau[1], &work[1], info);
  2415. if (ma < *n) {
  2416. i__1 = *n - ma;
  2417. NUMlapack_dorm2r ("Left", "Transpose", m, &i__1, &ma, &a[a_offset], lda, &tau[1], &a_ref (1,
  2418. ma + 1), lda, &work[1], info);
  2419. }
  2420. }
  2421. if (itemp < mn) {
  2422. /* Initialize partial column norms. The first n elements of work
  2423. store the exact column norms. */
  2424. i__1 = *n;
  2425. for (i__ = itemp + 1; i__ <= i__1; ++i__) {
  2426. i__2 = *m - itemp;
  2427. work[i__] = NUMblas_dnrm2 (&i__2, &a_ref (itemp + 1, i__), &c__1);
  2428. work[*n + i__] = work[i__];
  2429. /* L20: */
  2430. }
  2431. /* Compute factorization */
  2432. i__1 = mn;
  2433. for (i__ = itemp + 1; i__ <= i__1; ++i__) {
  2434. /* Determine ith pivot column and swap if necessary */
  2435. i__2 = *n - i__ + 1;
  2436. pvt = i__ - 1 + NUMblas_idamax (&i__2, &work[i__], &c__1);
  2437. if (pvt != i__) {
  2438. NUMblas_dswap (m, &a_ref (1, pvt), &c__1, &a_ref (1, i__), &c__1);
  2439. itemp = jpvt[pvt];
  2440. jpvt[pvt] = jpvt[i__];
  2441. jpvt[i__] = itemp;
  2442. work[pvt] = work[i__];
  2443. work[*n + pvt] = work[*n + i__];
  2444. }
  2445. /* Generate elementary reflector H(i) */
  2446. if (i__ < *m) {
  2447. i__2 = *m - i__ + 1;
  2448. NUMlapack_dlarfg (&i__2, &a_ref (i__, i__), &a_ref (i__ + 1, i__), &c__1, &tau[i__]);
  2449. } else {
  2450. NUMlapack_dlarfg (&c__1, &a_ref (*m, *m), &a_ref (*m, *m), &c__1, &tau[*m]);
  2451. }
  2452. if (i__ < *n) {
  2453. /* Apply H(i) to A(i:m,i+1:n) from the left */
  2454. aii = a_ref (i__, i__);
  2455. a_ref (i__, i__) = 1.;
  2456. i__2 = *m - i__ + 1;
  2457. i__3 = *n - i__;
  2458. NUMlapack_dlarf ("LEFT", &i__2, &i__3, &a_ref (i__, i__), &c__1, &tau[i__], &a_ref (i__,
  2459. i__ + 1), lda, &work[ (*n << 1) + 1]);
  2460. a_ref (i__, i__) = aii;
  2461. }
  2462. /* Update partial column norms */
  2463. i__2 = *n;
  2464. for (j = i__ + 1; j <= i__2; ++j) {
  2465. if (work[j] != 0.) {
  2466. /* Computing 2nd power */
  2467. d__2 = (d__1 = a_ref (i__, j), fabs (d__1)) / work[j];
  2468. temp = 1. - d__2 * d__2;
  2469. temp = MAX (temp, 0.);
  2470. /* Computing 2nd power */
  2471. d__1 = work[j] / work[*n + j];
  2472. temp2 = temp * .05 * (d__1 * d__1) + 1.;
  2473. if (temp2 == 1.) {
  2474. if (*m - i__ > 0) {
  2475. i__3 = *m - i__;
  2476. work[j] = NUMblas_dnrm2 (&i__3, &a_ref (i__ + 1, j), &c__1);
  2477. work[*n + j] = work[j];
  2478. } else {
  2479. work[j] = 0.;
  2480. work[*n + j] = 0.;
  2481. }
  2482. } else {
  2483. work[j] *= sqrt (temp);
  2484. }
  2485. }
  2486. /* L30: */
  2487. }
  2488. /* L40: */
  2489. }
  2490. }
  2491. return 0;
  2492. } /* NUMlapack_dgeqpf */
  2493. int NUMlapack_dgeqr2 (integer *m, integer *n, double *a, integer *lda, double *tau, double *work, integer *info) {
  2494. /* Table of constant values */
  2495. static integer c__1 = 1;
  2496. /* System generated locals */
  2497. integer a_dim1, a_offset, i__1, i__2, i__3;
  2498. /* Local variables */
  2499. static integer i__, k;
  2500. static double aii;
  2501. a_dim1 = *lda;
  2502. a_offset = 1 + a_dim1 * 1;
  2503. a -= a_offset;
  2504. --tau;
  2505. --work;
  2506. /* Function Body */
  2507. *info = 0;
  2508. if (*m < 0) {
  2509. *info = -1;
  2510. } else if (*n < 0) {
  2511. *info = -2;
  2512. } else if (*lda < MAX (1, *m)) {
  2513. *info = -4;
  2514. }
  2515. if (*info != 0) {
  2516. i__1 = - (*info);
  2517. xerbla_ ("DGEQR2", &i__1);
  2518. return 0;
  2519. }
  2520. k = MIN (*m, *n);
  2521. i__1 = k;
  2522. for (i__ = 1; i__ <= i__1; ++i__) {
  2523. /* Generate elementary reflector H(i) to annihilate A(i+1:m,i)
  2524. Computing MIN */
  2525. i__2 = i__ + 1;
  2526. i__3 = *m - i__ + 1;
  2527. NUMlapack_dlarfg (&i__3, &a_ref (i__, i__), &a_ref (MIN (i__2, *m), i__), &c__1, &tau[i__]);
  2528. if (i__ < *n) {
  2529. /* Apply H(i) to A(i:m,i+1:n) from the left */
  2530. aii = a_ref (i__, i__);
  2531. a_ref (i__, i__) = 1.;
  2532. i__2 = *m - i__ + 1;
  2533. i__3 = *n - i__;
  2534. NUMlapack_dlarf ("Left", &i__2, &i__3, &a_ref (i__, i__), &c__1, &tau[i__], &a_ref (i__, i__ + 1),
  2535. lda, &work[1]);
  2536. a_ref (i__, i__) = aii;
  2537. }
  2538. /* L10: */
  2539. }
  2540. return 0;
  2541. } /* NUMlapack_dgeqr2 */
  2542. int NUMlapack_dgeqrf (integer *m, integer *n, double *a, integer *lda, double *tau, double *work, integer *lwork,
  2543. integer *info) {
  2544. /* Table of constant values */
  2545. static integer c__1 = 1;
  2546. static integer c_n1 = -1;
  2547. static integer c__3 = 3;
  2548. static integer c__2 = 2;
  2549. /* System generated locals */
  2550. integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
  2551. /* Local variables */
  2552. static integer i__, k, nbmin, iinfo;
  2553. static integer ib, nb;
  2554. static integer nx;
  2555. static integer ldwork, lwkopt;
  2556. static integer lquery;
  2557. static integer iws;
  2558. a_dim1 = *lda;
  2559. a_offset = 1 + a_dim1 * 1;
  2560. a -= a_offset;
  2561. --tau;
  2562. --work;
  2563. /* Function Body */
  2564. *info = 0;
  2565. nb = NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2566. lwkopt = *n * nb;
  2567. work[1] = (double) lwkopt;
  2568. lquery = *lwork == -1;
  2569. if (*m < 0) {
  2570. *info = -1;
  2571. } else if (*n < 0) {
  2572. *info = -2;
  2573. } else if (*lda < MAX (1, *m)) {
  2574. *info = -4;
  2575. } else if (*lwork < MAX (1, *n) && !lquery) {
  2576. *info = -7;
  2577. }
  2578. if (*info != 0) {
  2579. i__1 = - (*info);
  2580. xerbla_ ("DGEQRF", &i__1);
  2581. return 0;
  2582. } else if (lquery) {
  2583. return 0;
  2584. }
  2585. /* Quick return if possible */
  2586. k = MIN (*m, *n);
  2587. if (k == 0) {
  2588. work[1] = 1.;
  2589. return 0;
  2590. }
  2591. nbmin = 2;
  2592. nx = 0;
  2593. iws = *n;
  2594. if (nb > 1 && nb < k) {
  2595. /* Determine when to cross over from blocked to unblocked code.
  2596. Computing MAX */
  2597. i__1 = 0, i__2 = NUMlapack_ilaenv (&c__3, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2598. nx = MAX (i__1, i__2);
  2599. if (nx < k) {
  2600. /* Determine if workspace is large enough for blocked code. */
  2601. ldwork = *n;
  2602. iws = ldwork * nb;
  2603. if (*lwork < iws) {
  2604. /* Not enough workspace to use optimal NB: reduce NB and
  2605. determine the minimum value of NB. */
  2606. nb = *lwork / ldwork;
  2607. /* Computing MAX */
  2608. i__1 = 2, i__2 = NUMlapack_ilaenv (&c__2, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2609. nbmin = MAX (i__1, i__2);
  2610. }
  2611. }
  2612. }
  2613. if (nb >= nbmin && nb < k && nx < k) {
  2614. /* Use blocked code initially */
  2615. i__1 = k - nx;
  2616. i__2 = nb;
  2617. for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  2618. /* Computing MIN */
  2619. i__3 = k - i__ + 1;
  2620. ib = MIN (i__3, nb);
  2621. /* Compute the QR factorization of the current block
  2622. A(i:m,i:i+ib-1) */
  2623. i__3 = *m - i__ + 1;
  2624. NUMlapack_dgeqr2 (&i__3, &ib, &a_ref (i__, i__), lda, &tau[i__], &work[1], &iinfo);
  2625. if (i__ + ib <= *n) {
  2626. /* Form the triangular factor of the block reflector H = H(i)
  2627. H(i+1) . . . H(i+ib-1) */
  2628. i__3 = *m - i__ + 1;
  2629. NUMlapack_dlarft ("Forward", "Columnwise", &i__3, &ib, &a_ref (i__, i__), lda, &tau[i__],
  2630. &work[1], &ldwork);
  2631. /* Apply H' to A(i:m,i+ib:n) from the left */
  2632. i__3 = *m - i__ + 1;
  2633. i__4 = *n - i__ - ib + 1;
  2634. NUMlapack_dlarfb ("Left", "Transpose", "Forward", "Columnwise", &i__3, &i__4, &ib, &a_ref (i__,
  2635. i__), lda, &work[1], &ldwork, &a_ref (i__, i__ + ib), lda, &work[ib + 1], &ldwork);
  2636. }
  2637. /* L10: */
  2638. }
  2639. } else {
  2640. i__ = 1;
  2641. }
  2642. /* Use unblocked code to factor the last or only block. */
  2643. if (i__ <= k) {
  2644. i__2 = *m - i__ + 1;
  2645. i__1 = *n - i__ + 1;
  2646. NUMlapack_dgeqr2 (&i__2, &i__1, &a_ref (i__, i__), lda, &tau[i__], &work[1], &iinfo);
  2647. }
  2648. work[1] = (double) iws;
  2649. return 0;
  2650. } /* NUMlapack_dgeqrf */
  2651. int NUMlapack_dgerq2 (integer *m, integer *n, double *a, integer *lda, double *tau, double *work, integer *info) {
  2652. /* System generated locals */
  2653. integer a_dim1, a_offset, i__1, i__2;
  2654. /* Local variables */
  2655. static integer i__, k;
  2656. static double aii;
  2657. a_dim1 = *lda;
  2658. a_offset = 1 + a_dim1 * 1;
  2659. a -= a_offset;
  2660. --tau;
  2661. --work;
  2662. /* Function Body */
  2663. *info = 0;
  2664. if (*m < 0) {
  2665. *info = -1;
  2666. } else if (*n < 0) {
  2667. *info = -2;
  2668. } else if (*lda < MAX (1, *m)) {
  2669. *info = -4;
  2670. }
  2671. if (*info != 0) {
  2672. i__1 = - (*info);
  2673. xerbla_ ("DGERQ2", &i__1);
  2674. return 0;
  2675. }
  2676. k = MIN (*m, *n);
  2677. for (i__ = k; i__ >= 1; --i__) {
  2678. /* Generate elementary reflector H(i) to annihilate
  2679. A(m-k+i,1:n-k+i-1) */
  2680. i__1 = *n - k + i__;
  2681. NUMlapack_dlarfg (&i__1, &a_ref (*m - k + i__, *n - k + i__), &a_ref (*m - k + i__, 1), lda, &tau[i__]);
  2682. /* Apply H(i) to A(1:m-k+i-1,1:n-k+i) from the right */
  2683. aii = a_ref (*m - k + i__, *n - k + i__);
  2684. a_ref (*m - k + i__, *n - k + i__) = 1.;
  2685. i__1 = *m - k + i__ - 1;
  2686. i__2 = *n - k + i__;
  2687. NUMlapack_dlarf ("Right", &i__1, &i__2, &a_ref (*m - k + i__, 1), lda, &tau[i__], &a[a_offset], lda,
  2688. &work[1]);
  2689. a_ref (*m - k + i__, *n - k + i__) = aii;
  2690. /* L10: */
  2691. }
  2692. return 0;
  2693. } /* NUMlapack_dgerq2 */
  2694. int NUMlapack_dgesv (integer *n, integer *nrhs, double *a, integer *lda, integer *ipiv, double *b, integer *ldb, integer *info) {
  2695. /* System generated locals */
  2696. integer a_dim1, a_offset, b_dim1, b_offset, i__1;
  2697. /* Local variables */
  2698. a_dim1 = *lda;
  2699. a_offset = 1 + a_dim1 * 1;
  2700. a -= a_offset;
  2701. --ipiv;
  2702. b_dim1 = *ldb;
  2703. b_offset = 1 + b_dim1 * 1;
  2704. b -= b_offset;
  2705. /* Function Body */
  2706. *info = 0;
  2707. if (*n < 0) {
  2708. *info = -1;
  2709. } else if (*nrhs < 0) {
  2710. *info = -2;
  2711. } else if (*lda < MAX (1, *n)) {
  2712. *info = -4;
  2713. } else if (*ldb < MAX (1, *n)) {
  2714. *info = -7;
  2715. }
  2716. if (*info != 0) {
  2717. i__1 = - (*info);
  2718. xerbla_ ("DGESV ", &i__1);
  2719. return 0;
  2720. }
  2721. /* Compute the LU factorization of A. */
  2722. NUMlapack_dgetrf (n, n, &a[a_offset], lda, &ipiv[1], info);
  2723. if (*info == 0) {
  2724. /* Solve the system A*X = B, overwriting B with X. */
  2725. NUMlapack_dgetrs ("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[b_offset], ldb, info);
  2726. }
  2727. return 0;
  2728. } /* NUMlapack_dgesv */
  2729. #define u_ref(a_1,a_2) u[(a_2)*u_dim1 + a_1]
  2730. #define vt_ref(a_1,a_2) vt[(a_2)*vt_dim1 + a_1]
  2731. int NUMlapack_dgesvd (const char *jobu, const char *jobvt, integer *m, integer *n, double *a, integer *lda, double *s, double *u,
  2732. integer *ldu, double *vt, integer *ldvt, double *work, integer *lwork, integer *info) {
  2733. /* System generated locals */
  2734. const char *a__1[2];
  2735. integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1[2], i__2, i__3, i__4;
  2736. char ch__1[2];
  2737. /* Local variables */
  2738. static integer iscl;
  2739. static double anrm;
  2740. static integer ierr, itau, ncvt, nrvt, i__;
  2741. static integer chunk, minmn, wrkbl, itaup, itauq, mnthr, iwork;
  2742. static integer wntua, wntva, wntun, wntuo, wntvn, wntvo, wntus, wntvs;
  2743. static integer ie;
  2744. static integer ir, bdspac, iu;
  2745. static double bignum;
  2746. static integer ldwrkr, minwrk, ldwrku, maxwrk;
  2747. static double smlnum;
  2748. static integer lquery, wntuas, wntvas;
  2749. static integer blk, ncu;
  2750. static double dum[1], eps;
  2751. static integer nru;
  2752. /* Parameter adjustments */
  2753. a_dim1 = *lda;
  2754. a_offset = 1 + a_dim1 * 1;
  2755. a -= a_offset;
  2756. --s;
  2757. u_dim1 = *ldu;
  2758. u_offset = 1 + u_dim1 * 1;
  2759. u -= u_offset;
  2760. vt_dim1 = *ldvt;
  2761. vt_offset = 1 + vt_dim1 * 1;
  2762. vt -= vt_offset;
  2763. --work;
  2764. /* Function Body */
  2765. *info = 0;
  2766. minmn = MIN (*m, *n);
  2767. /* Writing concatenation */
  2768. i__1[0] = 1, a__1[0] = jobu;
  2769. i__1[1] = 1, a__1[1] = jobvt;
  2770. s_cat (ch__1, a__1, i__1, &c__2, 2);
  2771. mnthr = NUMlapack_ilaenv (&c__6, "DGESVD", ch__1, m, n, &c__0, &c__0, 6, 2);
  2772. wntua = lsame_ (jobu, "A");
  2773. wntus = lsame_ (jobu, "S");
  2774. wntuas = wntua || wntus;
  2775. wntuo = lsame_ (jobu, "O");
  2776. wntun = lsame_ (jobu, "N");
  2777. wntva = lsame_ (jobvt, "A");
  2778. wntvs = lsame_ (jobvt, "S");
  2779. wntvas = wntva || wntvs;
  2780. wntvo = lsame_ (jobvt, "O");
  2781. wntvn = lsame_ (jobvt, "N");
  2782. minwrk = 1;
  2783. lquery = *lwork == -1;
  2784. if (! (wntua || wntus || wntuo || wntun)) {
  2785. *info = -1;
  2786. } else if (! (wntva || wntvs || wntvo || wntvn) || wntvo && wntuo) {
  2787. *info = -2;
  2788. } else if (*m < 0) {
  2789. *info = -3;
  2790. } else if (*n < 0) {
  2791. *info = -4;
  2792. } else if (*lda < MAX (1, *m)) {
  2793. *info = -6;
  2794. } else if (*ldu < 1 || wntuas && *ldu < *m) {
  2795. *info = -9;
  2796. } else if (*ldvt < 1 || wntva && *ldvt < *n || wntvs && *ldvt < minmn) {
  2797. *info = -11;
  2798. }
  2799. /* Compute workspace (Note: Comments in the code beginning "Workspace:"
  2800. describe the minimal amount of workspace needed at that point in the
  2801. code, as well as the preferred amount for good performance. NB refers
  2802. to the optimal block size for the immediately following subroutine, as
  2803. returned by ILAENV.) */
  2804. if (*info == 0 && (*lwork >= 1 || lquery) && *m > 0 && *n > 0) {
  2805. if (*m >= *n) {
  2806. /* Compute space needed for DBDSQR */
  2807. bdspac = *n * 5;
  2808. if (*m >= mnthr) {
  2809. if (wntun) {
  2810. /* Path 1 (M much larger than N, JOBU='N') */
  2811. maxwrk = *n + *n * NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2812. /* Computing MAX */
  2813. i__2 = maxwrk, i__3 =
  2814. *n * 3 + (*n << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, 6, 1);
  2815. maxwrk = MAX (i__2, i__3);
  2816. if (wntvo || wntvas) {
  2817. /* Computing MAX */
  2818. i__2 = maxwrk, i__3 =
  2819. *n * 3 + (*n - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", n, n, n, &c_n1, 6, 1);
  2820. maxwrk = MAX (i__2, i__3);
  2821. }
  2822. maxwrk = MAX (maxwrk, bdspac);
  2823. /* Computing MAX */
  2824. i__2 = *n << 2;
  2825. minwrk = MAX (i__2, bdspac);
  2826. maxwrk = MAX (maxwrk, minwrk);
  2827. } else if (wntuo && wntvn) {
  2828. /* Path 2 (M much larger than N, JOBU='O', JOBVT='N') */
  2829. wrkbl = *n + *n * NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2830. /* Computing MAX */
  2831. i__2 = wrkbl, i__3 =
  2832. *n + *n * NUMlapack_ilaenv (&c__1, "DORGQR", " ", m, n, n, &c_n1, 6, 1);
  2833. wrkbl = MAX (i__2, i__3);
  2834. /* Computing MAX */
  2835. i__2 = wrkbl, i__3 =
  2836. *n * 3 + (*n << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, 6, 1);
  2837. wrkbl = MAX (i__2, i__3);
  2838. /* Computing MAX */
  2839. i__2 = wrkbl, i__3 =
  2840. *n * 3 + *n * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", n, n, n, &c_n1, 6, 1);
  2841. wrkbl = MAX (i__2, i__3);
  2842. wrkbl = MAX (wrkbl, bdspac);
  2843. /* Computing MAX */
  2844. i__2 = *n * *n + wrkbl, i__3 = *n * *n + *m * *n + *n;
  2845. maxwrk = MAX (i__2, i__3);
  2846. /* Computing MAX */
  2847. i__2 = *n * 3 + *m;
  2848. minwrk = MAX (i__2, bdspac);
  2849. maxwrk = MAX (maxwrk, minwrk);
  2850. } else if (wntuo && wntvas) {
  2851. /* Path 3 (M much larger than N, JOBU='O', JOBVT='S' or
  2852. 'A') */
  2853. wrkbl = *n + *n * NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2854. /* Computing MAX */
  2855. i__2 = wrkbl, i__3 =
  2856. *n + *n * NUMlapack_ilaenv (&c__1, "DORGQR", " ", m, n, n, &c_n1, 6, 1);
  2857. wrkbl = MAX (i__2, i__3);
  2858. /* Computing MAX */
  2859. i__2 = wrkbl, i__3 =
  2860. *n * 3 + (*n << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, 6, 1);
  2861. wrkbl = MAX (i__2, i__3);
  2862. /* Computing MAX */
  2863. i__2 = wrkbl, i__3 =
  2864. *n * 3 + *n * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", n, n, n, &c_n1, 6, 1);
  2865. wrkbl = MAX (i__2, i__3);
  2866. /* Computing MAX */
  2867. i__2 = wrkbl, i__3 =
  2868. *n * 3 + (*n - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", n, n, n, &c_n1, 6, 1);
  2869. wrkbl = MAX (i__2, i__3);
  2870. wrkbl = MAX (wrkbl, bdspac);
  2871. /* Computing MAX */
  2872. i__2 = *n * *n + wrkbl, i__3 = *n * *n + *m * *n + *n;
  2873. maxwrk = MAX (i__2, i__3);
  2874. /* Computing MAX */
  2875. i__2 = *n * 3 + *m;
  2876. minwrk = MAX (i__2, bdspac);
  2877. maxwrk = MAX (maxwrk, minwrk);
  2878. } else if (wntus && wntvn) {
  2879. /* Path 4 (M much larger than N, JOBU='S', JOBVT='N') */
  2880. wrkbl = *n + *n * NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2881. /* Computing MAX */
  2882. i__2 = wrkbl, i__3 =
  2883. *n + *n * NUMlapack_ilaenv (&c__1, "DORGQR", " ", m, n, n, &c_n1, 6, 1);
  2884. wrkbl = MAX (i__2, i__3);
  2885. /* Computing MAX */
  2886. i__2 = wrkbl, i__3 =
  2887. *n * 3 + (*n << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, 6, 1);
  2888. wrkbl = MAX (i__2, i__3);
  2889. /* Computing MAX */
  2890. i__2 = wrkbl, i__3 =
  2891. *n * 3 + *n * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", n, n, n, &c_n1, 6, 1);
  2892. wrkbl = MAX (i__2, i__3);
  2893. wrkbl = MAX (wrkbl, bdspac);
  2894. maxwrk = *n * *n + wrkbl;
  2895. /* Computing MAX */
  2896. i__2 = *n * 3 + *m;
  2897. minwrk = MAX (i__2, bdspac);
  2898. maxwrk = MAX (maxwrk, minwrk);
  2899. } else if (wntus && wntvo) {
  2900. /* Path 5 (M much larger than N, JOBU='S', JOBVT='O') */
  2901. wrkbl = *n + *n * NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2902. /* Computing MAX */
  2903. i__2 = wrkbl, i__3 =
  2904. *n + *n * NUMlapack_ilaenv (&c__1, "DORGQR", " ", m, n, n, &c_n1, 6, 1);
  2905. wrkbl = MAX (i__2, i__3);
  2906. /* Computing MAX */
  2907. i__2 = wrkbl, i__3 =
  2908. *n * 3 + (*n << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, 6, 1);
  2909. wrkbl = MAX (i__2, i__3);
  2910. /* Computing MAX */
  2911. i__2 = wrkbl, i__3 =
  2912. *n * 3 + *n * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", n, n, n, &c_n1, 6, 1);
  2913. wrkbl = MAX (i__2, i__3);
  2914. /* Computing MAX */
  2915. i__2 = wrkbl, i__3 =
  2916. *n * 3 + (*n - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", n, n, n, &c_n1, 6, 1);
  2917. wrkbl = MAX (i__2, i__3);
  2918. wrkbl = MAX (wrkbl, bdspac);
  2919. maxwrk = (*n << 1) * *n + wrkbl;
  2920. /* Computing MAX */
  2921. i__2 = *n * 3 + *m;
  2922. minwrk = MAX (i__2, bdspac);
  2923. maxwrk = MAX (maxwrk, minwrk);
  2924. } else if (wntus && wntvas) {
  2925. /* Path 6 (M much larger than N, JOBU='S', JOBVT='S' or
  2926. 'A') */
  2927. wrkbl = *n + *n * NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2928. /* Computing MAX */
  2929. i__2 = wrkbl, i__3 =
  2930. *n + *n * NUMlapack_ilaenv (&c__1, "DORGQR", " ", m, n, n, &c_n1, 6, 1);
  2931. wrkbl = MAX (i__2, i__3);
  2932. /* Computing MAX */
  2933. i__2 = wrkbl, i__3 =
  2934. *n * 3 + (*n << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, 6, 1);
  2935. wrkbl = MAX (i__2, i__3);
  2936. /* Computing MAX */
  2937. i__2 = wrkbl, i__3 =
  2938. *n * 3 + *n * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", n, n, n, &c_n1, 6, 1);
  2939. wrkbl = MAX (i__2, i__3);
  2940. /* Computing MAX */
  2941. i__2 = wrkbl, i__3 =
  2942. *n * 3 + (*n - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", n, n, n, &c_n1, 6, 1);
  2943. wrkbl = MAX (i__2, i__3);
  2944. wrkbl = MAX (wrkbl, bdspac);
  2945. maxwrk = *n * *n + wrkbl;
  2946. /* Computing MAX */
  2947. i__2 = *n * 3 + *m;
  2948. minwrk = MAX (i__2, bdspac);
  2949. maxwrk = MAX (maxwrk, minwrk);
  2950. } else if (wntua && wntvn) {
  2951. /* Path 7 (M much larger than N, JOBU='A', JOBVT='N') */
  2952. wrkbl = *n + *n * NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2953. /* Computing MAX */
  2954. i__2 = wrkbl, i__3 =
  2955. *n + *m * NUMlapack_ilaenv (&c__1, "DORGQR", " ", m, m, n, &c_n1, 6, 1);
  2956. wrkbl = MAX (i__2, i__3);
  2957. /* Computing MAX */
  2958. i__2 = wrkbl, i__3 =
  2959. *n * 3 + (*n << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, 6, 1);
  2960. wrkbl = MAX (i__2, i__3);
  2961. /* Computing MAX */
  2962. i__2 = wrkbl, i__3 =
  2963. *n * 3 + *n * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", n, n, n, &c_n1, 6, 1);
  2964. wrkbl = MAX (i__2, i__3);
  2965. wrkbl = MAX (wrkbl, bdspac);
  2966. maxwrk = *n * *n + wrkbl;
  2967. /* Computing MAX */
  2968. i__2 = *n * 3 + *m;
  2969. minwrk = MAX (i__2, bdspac);
  2970. maxwrk = MAX (maxwrk, minwrk);
  2971. } else if (wntua && wntvo) {
  2972. /* Path 8 (M much larger than N, JOBU='A', JOBVT='O') */
  2973. wrkbl = *n + *n * NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  2974. /* Computing MAX */
  2975. i__2 = wrkbl, i__3 =
  2976. *n + *m * NUMlapack_ilaenv (&c__1, "DORGQR", " ", m, m, n, &c_n1, 6, 1);
  2977. wrkbl = MAX (i__2, i__3);
  2978. /* Computing MAX */
  2979. i__2 = wrkbl, i__3 =
  2980. *n * 3 + (*n << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, 6, 1);
  2981. wrkbl = MAX (i__2, i__3);
  2982. /* Computing MAX */
  2983. i__2 = wrkbl, i__3 =
  2984. *n * 3 + *n * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", n, n, n, &c_n1, 6, 1);
  2985. wrkbl = MAX (i__2, i__3);
  2986. /* Computing MAX */
  2987. i__2 = wrkbl, i__3 =
  2988. *n * 3 + (*n - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", n, n, n, &c_n1, 6, 1);
  2989. wrkbl = MAX (i__2, i__3);
  2990. wrkbl = MAX (wrkbl, bdspac);
  2991. maxwrk = (*n << 1) * *n + wrkbl;
  2992. /* Computing MAX */
  2993. i__2 = *n * 3 + *m;
  2994. minwrk = MAX (i__2, bdspac);
  2995. maxwrk = MAX (maxwrk, minwrk);
  2996. } else if (wntua && wntvas) {
  2997. /* Path 9 (M much larger than N, JOBU='A', JOBVT='S' or
  2998. 'A') */
  2999. wrkbl = *n + *n * NUMlapack_ilaenv (&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  3000. /* Computing MAX */
  3001. i__2 = wrkbl, i__3 =
  3002. *n + *m * NUMlapack_ilaenv (&c__1, "DORGQR", " ", m, m, n, &c_n1, 6, 1);
  3003. wrkbl = MAX (i__2, i__3);
  3004. /* Computing MAX */
  3005. i__2 = wrkbl, i__3 =
  3006. *n * 3 + (*n << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, 6, 1);
  3007. wrkbl = MAX (i__2, i__3);
  3008. /* Computing MAX */
  3009. i__2 = wrkbl, i__3 =
  3010. *n * 3 + *n * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", n, n, n, &c_n1, 6, 1);
  3011. wrkbl = MAX (i__2, i__3);
  3012. /* Computing MAX */
  3013. i__2 = wrkbl, i__3 =
  3014. *n * 3 + (*n - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", n, n, n, &c_n1, 6, 1);
  3015. wrkbl = MAX (i__2, i__3);
  3016. wrkbl = MAX (wrkbl, bdspac);
  3017. maxwrk = *n * *n + wrkbl;
  3018. /* Computing MAX */
  3019. i__2 = *n * 3 + *m;
  3020. minwrk = MAX (i__2, bdspac);
  3021. maxwrk = MAX (maxwrk, minwrk);
  3022. }
  3023. } else {
  3024. /* Path 10 (M at least N, but not much larger) */
  3025. maxwrk = *n * 3 + (*m + *n) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, 6, 1);
  3026. if (wntus || wntuo) {
  3027. /* Computing MAX */
  3028. i__2 = maxwrk, i__3 =
  3029. *n * 3 + *n * NUMlapack_ilaenv (&c__1, "DORG" "BR", "Q", m, n, n, &c_n1, 6, 1);
  3030. maxwrk = MAX (i__2, i__3);
  3031. }
  3032. if (wntua) {
  3033. /* Computing MAX */
  3034. i__2 = maxwrk, i__3 =
  3035. *n * 3 + *m * NUMlapack_ilaenv (&c__1, "DORG" "BR", "Q", m, m, n, &c_n1, 6, 1);
  3036. maxwrk = MAX (i__2, i__3);
  3037. }
  3038. if (!wntvn) {
  3039. /* Computing MAX */
  3040. i__2 = maxwrk, i__3 =
  3041. *n * 3 + (*n - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", n, n, n, &c_n1, 6, 1);
  3042. maxwrk = MAX (i__2, i__3);
  3043. }
  3044. maxwrk = MAX (maxwrk, bdspac);
  3045. /* Computing MAX */
  3046. i__2 = *n * 3 + *m;
  3047. minwrk = MAX (i__2, bdspac);
  3048. maxwrk = MAX (maxwrk, minwrk);
  3049. }
  3050. } else {
  3051. /* Compute space needed for DBDSQR */
  3052. bdspac = *m * 5;
  3053. if (*n >= mnthr) {
  3054. if (wntvn) {
  3055. /* Path 1t(N much larger than M, JOBVT='N') */
  3056. maxwrk = *m + *m * NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  3057. /* Computing MAX */
  3058. i__2 = maxwrk, i__3 =
  3059. *m * 3 + (*m << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, 6, 1);
  3060. maxwrk = MAX (i__2, i__3);
  3061. if (wntuo || wntuas) {
  3062. /* Computing MAX */
  3063. i__2 = maxwrk, i__3 =
  3064. *m * 3 + *m * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", m, m, m, &c_n1, 6, 1);
  3065. maxwrk = MAX (i__2, i__3);
  3066. }
  3067. maxwrk = MAX (maxwrk, bdspac);
  3068. /* Computing MAX */
  3069. i__2 = *m << 2;
  3070. minwrk = MAX (i__2, bdspac);
  3071. maxwrk = MAX (maxwrk, minwrk);
  3072. } else if (wntvo && wntun) {
  3073. /* Path 2t(N much larger than M, JOBU='N', JOBVT='O') */
  3074. wrkbl = *m + *m * NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  3075. /* Computing MAX */
  3076. i__2 = wrkbl, i__3 =
  3077. *m + *m * NUMlapack_ilaenv (&c__1, "DORGLQ", " ", m, n, m, &c_n1, 6, 1);
  3078. wrkbl = MAX (i__2, i__3);
  3079. /* Computing MAX */
  3080. i__2 = wrkbl, i__3 =
  3081. *m * 3 + (*m << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, 6, 1);
  3082. wrkbl = MAX (i__2, i__3);
  3083. /* Computing MAX */
  3084. i__2 = wrkbl, i__3 =
  3085. *m * 3 + (*m - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", m, m, m, &c_n1, 6, 1);
  3086. wrkbl = MAX (i__2, i__3);
  3087. wrkbl = MAX (wrkbl, bdspac);
  3088. /* Computing MAX */
  3089. i__2 = *m * *m + wrkbl, i__3 = *m * *m + *m * *n + *m;
  3090. maxwrk = MAX (i__2, i__3);
  3091. /* Computing MAX */
  3092. i__2 = *m * 3 + *n;
  3093. minwrk = MAX (i__2, bdspac);
  3094. maxwrk = MAX (maxwrk, minwrk);
  3095. } else if (wntvo && wntuas) {
  3096. /* Path 3t(N much larger than M, JOBU='S' or 'A',
  3097. JOBVT='O') */
  3098. wrkbl = *m + *m * NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  3099. /* Computing MAX */
  3100. i__2 = wrkbl, i__3 =
  3101. *m + *m * NUMlapack_ilaenv (&c__1, "DORGLQ", " ", m, n, m, &c_n1, 6, 1);
  3102. wrkbl = MAX (i__2, i__3);
  3103. /* Computing MAX */
  3104. i__2 = wrkbl, i__3 =
  3105. *m * 3 + (*m << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, 6, 1);
  3106. wrkbl = MAX (i__2, i__3);
  3107. /* Computing MAX */
  3108. i__2 = wrkbl, i__3 =
  3109. *m * 3 + (*m - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", m, m, m, &c_n1, 6, 1);
  3110. wrkbl = MAX (i__2, i__3);
  3111. /* Computing MAX */
  3112. i__2 = wrkbl, i__3 =
  3113. *m * 3 + *m * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", m, m, m, &c_n1, 6, 1);
  3114. wrkbl = MAX (i__2, i__3);
  3115. wrkbl = MAX (wrkbl, bdspac);
  3116. /* Computing MAX */
  3117. i__2 = *m * *m + wrkbl, i__3 = *m * *m + *m * *n + *m;
  3118. maxwrk = MAX (i__2, i__3);
  3119. /* Computing MAX */
  3120. i__2 = *m * 3 + *n;
  3121. minwrk = MAX (i__2, bdspac);
  3122. maxwrk = MAX (maxwrk, minwrk);
  3123. } else if (wntvs && wntun) {
  3124. /* Path 4t(N much larger than M, JOBU='N', JOBVT='S') */
  3125. wrkbl = *m + *m * NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  3126. /* Computing MAX */
  3127. i__2 = wrkbl, i__3 =
  3128. *m + *m * NUMlapack_ilaenv (&c__1, "DORGLQ", " ", m, n, m, &c_n1, 6, 1);
  3129. wrkbl = MAX (i__2, i__3);
  3130. /* Computing MAX */
  3131. i__2 = wrkbl, i__3 =
  3132. *m * 3 + (*m << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, 6, 1);
  3133. wrkbl = MAX (i__2, i__3);
  3134. /* Computing MAX */
  3135. i__2 = wrkbl, i__3 =
  3136. *m * 3 + (*m - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", m, m, m, &c_n1, 6, 1);
  3137. wrkbl = MAX (i__2, i__3);
  3138. wrkbl = MAX (wrkbl, bdspac);
  3139. maxwrk = *m * *m + wrkbl;
  3140. /* Computing MAX */
  3141. i__2 = *m * 3 + *n;
  3142. minwrk = MAX (i__2, bdspac);
  3143. maxwrk = MAX (maxwrk, minwrk);
  3144. } else if (wntvs && wntuo) {
  3145. /* Path 5t(N much larger than M, JOBU='O', JOBVT='S') */
  3146. wrkbl = *m + *m * NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  3147. /* Computing MAX */
  3148. i__2 = wrkbl, i__3 =
  3149. *m + *m * NUMlapack_ilaenv (&c__1, "DORGLQ", " ", m, n, m, &c_n1, 6, 1);
  3150. wrkbl = MAX (i__2, i__3);
  3151. /* Computing MAX */
  3152. i__2 = wrkbl, i__3 =
  3153. *m * 3 + (*m << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, 6, 1);
  3154. wrkbl = MAX (i__2, i__3);
  3155. /* Computing MAX */
  3156. i__2 = wrkbl, i__3 =
  3157. *m * 3 + (*m - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", m, m, m, &c_n1, 6, 1);
  3158. wrkbl = MAX (i__2, i__3);
  3159. /* Computing MAX */
  3160. i__2 = wrkbl, i__3 =
  3161. *m * 3 + *m * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", m, m, m, &c_n1, 6, 1);
  3162. wrkbl = MAX (i__2, i__3);
  3163. wrkbl = MAX (wrkbl, bdspac);
  3164. maxwrk = (*m << 1) * *m + wrkbl;
  3165. /* Computing MAX */
  3166. i__2 = *m * 3 + *n;
  3167. minwrk = MAX (i__2, bdspac);
  3168. maxwrk = MAX (maxwrk, minwrk);
  3169. } else if (wntvs && wntuas) {
  3170. /* Path 6t(N much larger than M, JOBU='S' or 'A',
  3171. JOBVT='S') */
  3172. wrkbl = *m + *m * NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  3173. /* Computing MAX */
  3174. i__2 = wrkbl, i__3 =
  3175. *m + *m * NUMlapack_ilaenv (&c__1, "DORGLQ", " ", m, n, m, &c_n1, 6, 1);
  3176. wrkbl = MAX (i__2, i__3);
  3177. /* Computing MAX */
  3178. i__2 = wrkbl, i__3 =
  3179. *m * 3 + (*m << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, 6, 1);
  3180. wrkbl = MAX (i__2, i__3);
  3181. /* Computing MAX */
  3182. i__2 = wrkbl, i__3 =
  3183. *m * 3 + (*m - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", m, m, m, &c_n1, 6, 1);
  3184. wrkbl = MAX (i__2, i__3);
  3185. /* Computing MAX */
  3186. i__2 = wrkbl, i__3 =
  3187. *m * 3 + *m * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", m, m, m, &c_n1, 6, 1);
  3188. wrkbl = MAX (i__2, i__3);
  3189. wrkbl = MAX (wrkbl, bdspac);
  3190. maxwrk = *m * *m + wrkbl;
  3191. /* Computing MAX */
  3192. i__2 = *m * 3 + *n;
  3193. minwrk = MAX (i__2, bdspac);
  3194. maxwrk = MAX (maxwrk, minwrk);
  3195. } else if (wntva && wntun) {
  3196. /* Path 7t(N much larger than M, JOBU='N', JOBVT='A') */
  3197. wrkbl = *m + *m * NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  3198. /* Computing MAX */
  3199. i__2 = wrkbl, i__3 =
  3200. *m + *n * NUMlapack_ilaenv (&c__1, "DORGLQ", " ", n, n, m, &c_n1, 6, 1);
  3201. wrkbl = MAX (i__2, i__3);
  3202. /* Computing MAX */
  3203. i__2 = wrkbl, i__3 =
  3204. *m * 3 + (*m << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, 6, 1);
  3205. wrkbl = MAX (i__2, i__3);
  3206. /* Computing MAX */
  3207. i__2 = wrkbl, i__3 =
  3208. *m * 3 + (*m - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", m, m, m, &c_n1, 6, 1);
  3209. wrkbl = MAX (i__2, i__3);
  3210. wrkbl = MAX (wrkbl, bdspac);
  3211. maxwrk = *m * *m + wrkbl;
  3212. /* Computing MAX */
  3213. i__2 = *m * 3 + *n;
  3214. minwrk = MAX (i__2, bdspac);
  3215. maxwrk = MAX (maxwrk, minwrk);
  3216. } else if (wntva && wntuo) {
  3217. /* Path 8t(N much larger than M, JOBU='O', JOBVT='A') */
  3218. wrkbl = *m + *m * NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  3219. /* Computing MAX */
  3220. i__2 = wrkbl, i__3 =
  3221. *m + *n * NUMlapack_ilaenv (&c__1, "DORGLQ", " ", n, n, m, &c_n1, 6, 1);
  3222. wrkbl = MAX (i__2, i__3);
  3223. /* Computing MAX */
  3224. i__2 = wrkbl, i__3 =
  3225. *m * 3 + (*m << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, 6, 1);
  3226. wrkbl = MAX (i__2, i__3);
  3227. /* Computing MAX */
  3228. i__2 = wrkbl, i__3 =
  3229. *m * 3 + (*m - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", m, m, m, &c_n1, 6, 1);
  3230. wrkbl = MAX (i__2, i__3);
  3231. /* Computing MAX */
  3232. i__2 = wrkbl, i__3 =
  3233. *m * 3 + *m * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", m, m, m, &c_n1, 6, 1);
  3234. wrkbl = MAX (i__2, i__3);
  3235. wrkbl = MAX (wrkbl, bdspac);
  3236. maxwrk = (*m << 1) * *m + wrkbl;
  3237. /* Computing MAX */
  3238. i__2 = *m * 3 + *n;
  3239. minwrk = MAX (i__2, bdspac);
  3240. maxwrk = MAX (maxwrk, minwrk);
  3241. } else if (wntva && wntuas) {
  3242. /* Path 9t(N much larger than M, JOBU='S' or 'A',
  3243. JOBVT='A') */
  3244. wrkbl = *m + *m * NUMlapack_ilaenv (&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, 6, 1);
  3245. /* Computing MAX */
  3246. i__2 = wrkbl, i__3 =
  3247. *m + *n * NUMlapack_ilaenv (&c__1, "DORGLQ", " ", n, n, m, &c_n1, 6, 1);
  3248. wrkbl = MAX (i__2, i__3);
  3249. /* Computing MAX */
  3250. i__2 = wrkbl, i__3 =
  3251. *m * 3 + (*m << 1) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, 6, 1);
  3252. wrkbl = MAX (i__2, i__3);
  3253. /* Computing MAX */
  3254. i__2 = wrkbl, i__3 =
  3255. *m * 3 + (*m - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "P", m, m, m, &c_n1, 6, 1);
  3256. wrkbl = MAX (i__2, i__3);
  3257. /* Computing MAX */
  3258. i__2 = wrkbl, i__3 =
  3259. *m * 3 + *m * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", m, m, m, &c_n1, 6, 1);
  3260. wrkbl = MAX (i__2, i__3);
  3261. wrkbl = MAX (wrkbl, bdspac);
  3262. maxwrk = *m * *m + wrkbl;
  3263. /* Computing MAX */
  3264. i__2 = *m * 3 + *n;
  3265. minwrk = MAX (i__2, bdspac);
  3266. maxwrk = MAX (maxwrk, minwrk);
  3267. }
  3268. } else {
  3269. /* Path 10t(N greater than M, but not much larger) */
  3270. maxwrk = *m * 3 + (*m + *n) * NUMlapack_ilaenv (&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, 6, 1);
  3271. if (wntvs || wntvo) {
  3272. /* Computing MAX */
  3273. i__2 = maxwrk, i__3 =
  3274. *m * 3 + *m * NUMlapack_ilaenv (&c__1, "DORG" "BR", "P", m, n, m, &c_n1, 6, 1);
  3275. maxwrk = MAX (i__2, i__3);
  3276. }
  3277. if (wntva) {
  3278. /* Computing MAX */
  3279. i__2 = maxwrk, i__3 =
  3280. *m * 3 + *n * NUMlapack_ilaenv (&c__1, "DORG" "BR", "P", n, n, m, &c_n1, 6, 1);
  3281. maxwrk = MAX (i__2, i__3);
  3282. }
  3283. if (!wntun) {
  3284. /* Computing MAX */
  3285. i__2 = maxwrk, i__3 =
  3286. *m * 3 + (*m - 1) * NUMlapack_ilaenv (&c__1, "DORGBR", "Q", m, m, m, &c_n1, 6, 1);
  3287. maxwrk = MAX (i__2, i__3);
  3288. }
  3289. maxwrk = MAX (maxwrk, bdspac);
  3290. /* Computing MAX */
  3291. i__2 = *m * 3 + *n;
  3292. minwrk = MAX (i__2, bdspac);
  3293. maxwrk = MAX (maxwrk, minwrk);
  3294. }
  3295. }
  3296. work[1] = (double) maxwrk;
  3297. }
  3298. if (*lwork < minwrk && !lquery) {
  3299. *info = -13;
  3300. }
  3301. if (*info != 0) {
  3302. i__2 = - (*info);
  3303. xerbla_ ("DGESVD", &i__2);
  3304. return 0;
  3305. } else if (lquery) {
  3306. return 0;
  3307. }
  3308. /* Quick return if possible */
  3309. if (*m == 0 || *n == 0) {
  3310. if (*lwork >= 1) {
  3311. work[1] = 1.;
  3312. }
  3313. return 0;
  3314. }
  3315. /* Get machine constants */
  3316. eps = NUMblas_dlamch ("P");
  3317. smlnum = sqrt (NUMblas_dlamch ("S")) / eps;
  3318. bignum = 1. / smlnum;
  3319. /* Scale A if max element outside range [SMLNUM,BIGNUM] */
  3320. anrm = NUMlapack_dlange ("M", m, n, &a[a_offset], lda, dum);
  3321. iscl = 0;
  3322. if (anrm > 0. && anrm < smlnum) {
  3323. iscl = 1;
  3324. NUMlapack_dlascl ("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &ierr);
  3325. } else if (anrm > bignum) {
  3326. iscl = 1;
  3327. NUMlapack_dlascl ("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &ierr);
  3328. }
  3329. if (*m >= *n) {
  3330. /* A has at least as many rows as columns. If A has sufficiently more
  3331. rows than columns, first reduce using the QR decomposition (if
  3332. sufficient workspace available) */
  3333. if (*m >= mnthr) {
  3334. if (wntun) {
  3335. /* Path 1 (M much larger than N, JOBU='N') No left singular
  3336. vectors to be computed */
  3337. itau = 1;
  3338. iwork = itau + *n;
  3339. /* Compute A=Q*R (Workspace: need 2*N, prefer N+N*NB) */
  3340. i__2 = *lwork - iwork + 1;
  3341. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3342. /* Zero out below R */
  3343. i__2 = *n - 1;
  3344. i__3 = *n - 1;
  3345. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &a_ref (2, 1), lda);
  3346. ie = 1;
  3347. itauq = ie + *n;
  3348. itaup = itauq + *n;
  3349. iwork = itaup + *n;
  3350. /* Bidiagonalize R in A (Workspace: need 4*N, prefer
  3351. 3*N+2*N*NB) */
  3352. i__2 = *lwork - iwork + 1;
  3353. NUMlapack_dgebrd (n, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  3354. &work[iwork], &i__2, &ierr);
  3355. ncvt = 0;
  3356. if (wntvo || wntvas) {
  3357. /* If right singular vectors desired, generate P'.
  3358. (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
  3359. i__2 = *lwork - iwork + 1;
  3360. NUMlapack_dorgbr ("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[iwork], &i__2,
  3361. &ierr);
  3362. ncvt = *n;
  3363. }
  3364. iwork = ie + *n;
  3365. /* Perform bidiagonal QR iteration, computing right singular
  3366. vectors of A in A if desired (Workspace: need BDSPAC) */
  3367. NUMlapack_dbdsqr ("U", n, &ncvt, &c__0, &c__0, &s[1], &work[ie], &a[a_offset], lda, dum, &c__1,
  3368. dum, &c__1, &work[iwork], info);
  3369. /* If right singular vectors desired in VT, copy them there */
  3370. if (wntvas) {
  3371. NUMlapack_dlacpy ("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  3372. }
  3373. } else if (wntuo && wntvn) {
  3374. /* Path 2 (M much larger than N, JOBU='O', JOBVT='N') N left
  3375. singular vectors to be overwritten on A and no right
  3376. singular vectors to be computed
  3377. Computing MAX */
  3378. i__2 = *n << 2;
  3379. if (*lwork >= *n * *n + MAX (i__2, bdspac)) {
  3380. /* Sufficient workspace for a fast algorithm */
  3381. ir = 1;
  3382. /* Computing MAX */
  3383. i__2 = wrkbl, i__3 = *lda * *n + *n;
  3384. if (*lwork >= MAX (i__2, i__3) + *lda * *n) {
  3385. /* WORK(IU) is LDA by N, WORK(IR) is LDA by N */
  3386. ldwrku = *lda;
  3387. ldwrkr = *lda;
  3388. } else { /* if(complicated condition) */
  3389. /* Computing MAX */
  3390. i__2 = wrkbl, i__3 = *lda * *n + *n;
  3391. if (*lwork >= MAX (i__2, i__3) + *n * *n) {
  3392. /* WORK(IU) is LDA by N, WORK(IR) is N by N */
  3393. ldwrku = *lda;
  3394. ldwrkr = *n;
  3395. } else {
  3396. /* WORK(IU) is LDWRKU by N, WORK(IR) is N by N */
  3397. ldwrku = (*lwork - *n * *n - *n) / *n;
  3398. ldwrkr = *n;
  3399. }
  3400. }
  3401. itau = ir + ldwrkr * *n;
  3402. iwork = itau + *n;
  3403. /* Compute A=Q*R (Workspace: need N*N+2*N, prefer
  3404. N*N+N+N*NB) */
  3405. i__2 = *lwork - iwork + 1;
  3406. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3407. /* Copy R to WORK(IR) and zero out below it */
  3408. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
  3409. i__2 = *n - 1;
  3410. i__3 = *n - 1;
  3411. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &work[ir + 1], &ldwrkr);
  3412. /* Generate Q in A (Workspace: need N*N+2*N, prefer
  3413. N*N+N+N*NB) */
  3414. i__2 = *lwork - iwork + 1;
  3415. NUMlapack_dorgqr (m, n, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3416. ie = itau;
  3417. itauq = ie + *n;
  3418. itaup = itauq + *n;
  3419. iwork = itaup + *n;
  3420. /* Bidiagonalize R in WORK(IR) (Workspace: need N*N+4*N,
  3421. prefer N*N+3*N+2*N*NB) */
  3422. i__2 = *lwork - iwork + 1;
  3423. NUMlapack_dgebrd (n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[itauq], &work[itaup],
  3424. &work[iwork], &i__2, &ierr);
  3425. /* Generate left vectors bidiagonalizing R (Workspace:
  3426. need N*N+4*N, prefer N*N+3*N+N*NB) */
  3427. i__2 = *lwork - iwork + 1;
  3428. NUMlapack_dorgbr ("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &work[iwork], &i__2,
  3429. &ierr);
  3430. iwork = ie + *n;
  3431. /* Perform bidiagonal QR iteration, computing left
  3432. singular vectors of R in WORK(IR) (Workspace: need
  3433. N*N+BDSPAC) */
  3434. NUMlapack_dbdsqr ("U", n, &c__0, n, &c__0, &s[1], &work[ie], dum, &c__1, &work[ir], &ldwrkr,
  3435. dum, &c__1, &work[iwork], info);
  3436. iu = ie + *n;
  3437. /* Multiply Q in A by left singular vectors of R in
  3438. WORK(IR), storing result in WORK(IU) and copying to A
  3439. (Workspace: need N*N+2*N, prefer N*N+M*N+N) */
  3440. i__2 = *m;
  3441. i__3 = ldwrku;
  3442. for (i__ = 1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3) {
  3443. /* Computing MIN */
  3444. i__4 = *m - i__ + 1;
  3445. chunk = MIN (i__4, ldwrku);
  3446. NUMblas_dgemm ("N", "N", &chunk, n, n, &c_b438, &a_ref (i__, 1), lda, &work[ir], &ldwrkr,
  3447. &c_b416, &work[iu], &ldwrku);
  3448. NUMlapack_dlacpy ("F", &chunk, n, &work[iu], &ldwrku, &a_ref (i__, 1), lda);
  3449. /* L10: */
  3450. }
  3451. } else {
  3452. /* Insufficient workspace for a fast algorithm */
  3453. ie = 1;
  3454. itauq = ie + *n;
  3455. itaup = itauq + *n;
  3456. iwork = itaup + *n;
  3457. /* Bidiagonalize A (Workspace: need 3*N+M, prefer
  3458. 3*N+(M+N)*NB) */
  3459. i__3 = *lwork - iwork + 1;
  3460. NUMlapack_dgebrd (m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  3461. &work[iwork], &i__3, &ierr);
  3462. /* Generate left vectors bidiagonalizing A (Workspace:
  3463. need 4*N, prefer 3*N+N*NB) */
  3464. i__3 = *lwork - iwork + 1;
  3465. NUMlapack_dorgbr ("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[iwork], &i__3,
  3466. &ierr);
  3467. iwork = ie + *n;
  3468. /* Perform bidiagonal QR iteration, computing left
  3469. singular vectors of A in A (Workspace: need BDSPAC) */
  3470. NUMlapack_dbdsqr ("U", n, &c__0, m, &c__0, &s[1], &work[ie], dum, &c__1, &a[a_offset], lda,
  3471. dum, &c__1, &work[iwork], info);
  3472. }
  3473. } else if (wntuo && wntvas) {
  3474. /* Path 3 (M much larger than N, JOBU='O', JOBVT='S' or 'A')
  3475. N left singular vectors to be overwritten on A and N right
  3476. singular vectors to be computed in VT
  3477. Computing MAX */
  3478. i__3 = *n << 2;
  3479. if (*lwork >= *n * *n + MAX (i__3, bdspac)) {
  3480. /* Sufficient workspace for a fast algorithm */
  3481. ir = 1;
  3482. /* Computing MAX */
  3483. i__3 = wrkbl, i__2 = *lda * *n + *n;
  3484. if (*lwork >= MAX (i__3, i__2) + *lda * *n) {
  3485. /* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
  3486. ldwrku = *lda;
  3487. ldwrkr = *lda;
  3488. } else { /* if(complicated condition) */
  3489. /* Computing MAX */
  3490. i__3 = wrkbl, i__2 = *lda * *n + *n;
  3491. if (*lwork >= MAX (i__3, i__2) + *n * *n) {
  3492. /* WORK(IU) is LDA by N and WORK(IR) is N by N */
  3493. ldwrku = *lda;
  3494. ldwrkr = *n;
  3495. } else {
  3496. /* WORK(IU) is LDWRKU by N and WORK(IR) is N by N
  3497. */
  3498. ldwrku = (*lwork - *n * *n - *n) / *n;
  3499. ldwrkr = *n;
  3500. }
  3501. }
  3502. itau = ir + ldwrkr * *n;
  3503. iwork = itau + *n;
  3504. /* Compute A=Q*R (Workspace: need N*N+2*N, prefer
  3505. N*N+N+N*NB) */
  3506. i__3 = *lwork - iwork + 1;
  3507. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__3, &ierr);
  3508. /* Copy R to VT, zeroing out below it */
  3509. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  3510. i__3 = *n - 1;
  3511. i__2 = *n - 1;
  3512. NUMlapack_dlaset ("L", &i__3, &i__2, &c_b416, &c_b416, &vt_ref (2, 1), ldvt);
  3513. /* Generate Q in A (Workspace: need N*N+2*N, prefer
  3514. N*N+N+N*NB) */
  3515. i__3 = *lwork - iwork + 1;
  3516. NUMlapack_dorgqr (m, n, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__3, &ierr);
  3517. ie = itau;
  3518. itauq = ie + *n;
  3519. itaup = itauq + *n;
  3520. iwork = itaup + *n;
  3521. /* Bidiagonalize R in VT, copying result to WORK(IR)
  3522. (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
  3523. i__3 = *lwork - iwork + 1;
  3524. NUMlapack_dgebrd (n, n, &vt[vt_offset], ldvt, &s[1], &work[ie], &work[itauq], &work[itaup],
  3525. &work[iwork], &i__3, &ierr);
  3526. NUMlapack_dlacpy ("L", n, n, &vt[vt_offset], ldvt, &work[ir], &ldwrkr);
  3527. /* Generate left vectors bidiagonalizing R in WORK(IR)
  3528. (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
  3529. i__3 = *lwork - iwork + 1;
  3530. NUMlapack_dorgbr ("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &work[iwork], &i__3,
  3531. &ierr);
  3532. /* Generate right vectors bidiagonalizing R in VT
  3533. (Workspace: need N*N+4*N-1, prefer N*N+3*N+(N-1)*NB) */
  3534. i__3 = *lwork - iwork + 1;
  3535. NUMlapack_dorgbr ("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &work[iwork], &i__3,
  3536. &ierr);
  3537. iwork = ie + *n;
  3538. /* Perform bidiagonal QR iteration, computing left
  3539. singular vectors of R in WORK(IR) and computing right
  3540. singular vectors of R in VT (Workspace: need
  3541. N*N+BDSPAC) */
  3542. NUMlapack_dbdsqr ("U", n, n, n, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt, &work[ir],
  3543. &ldwrkr, dum, &c__1, &work[iwork], info);
  3544. iu = ie + *n;
  3545. /* Multiply Q in A by left singular vectors of R in
  3546. WORK(IR), storing result in WORK(IU) and copying to A
  3547. (Workspace: need N*N+2*N, prefer N*N+M*N+N) */
  3548. i__3 = *m;
  3549. i__2 = ldwrku;
  3550. for (i__ = 1; i__2 < 0 ? i__ >= i__3 : i__ <= i__3; i__ += i__2) {
  3551. /* Computing MIN */
  3552. i__4 = *m - i__ + 1;
  3553. chunk = MIN (i__4, ldwrku);
  3554. NUMblas_dgemm ("N", "N", &chunk, n, n, &c_b438, &a_ref (i__, 1), lda, &work[ir], &ldwrkr,
  3555. &c_b416, &work[iu], &ldwrku);
  3556. NUMlapack_dlacpy ("F", &chunk, n, &work[iu], &ldwrku, &a_ref (i__, 1), lda);
  3557. /* L20: */
  3558. }
  3559. } else {
  3560. /* Insufficient workspace for a fast algorithm */
  3561. itau = 1;
  3562. iwork = itau + *n;
  3563. /* Compute A=Q*R (Workspace: need 2*N, prefer N+N*NB) */
  3564. i__2 = *lwork - iwork + 1;
  3565. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3566. /* Copy R to VT, zeroing out below it */
  3567. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  3568. i__2 = *n - 1;
  3569. i__3 = *n - 1;
  3570. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &vt_ref (2, 1), ldvt);
  3571. /* Generate Q in A (Workspace: need 2*N, prefer N+N*NB) */
  3572. i__2 = *lwork - iwork + 1;
  3573. NUMlapack_dorgqr (m, n, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3574. ie = itau;
  3575. itauq = ie + *n;
  3576. itaup = itauq + *n;
  3577. iwork = itaup + *n;
  3578. /* Bidiagonalize R in VT (Workspace: need 4*N, prefer
  3579. 3*N+2*N*NB) */
  3580. i__2 = *lwork - iwork + 1;
  3581. NUMlapack_dgebrd (n, n, &vt[vt_offset], ldvt, &s[1], &work[ie], &work[itauq], &work[itaup],
  3582. &work[iwork], &i__2, &ierr);
  3583. /* Multiply Q in A by left vectors bidiagonalizing R
  3584. (Workspace: need 3*N+M, prefer 3*N+M*NB) */
  3585. i__2 = *lwork - iwork + 1;
  3586. NUMlapack_dormbr ("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt, &work[itauq], &a[a_offset],
  3587. lda, &work[iwork], &i__2, &ierr);
  3588. /* Generate right vectors bidiagonalizing R in VT
  3589. (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
  3590. i__2 = *lwork - iwork + 1;
  3591. NUMlapack_dorgbr ("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &work[iwork], &i__2,
  3592. &ierr);
  3593. iwork = ie + *n;
  3594. /* Perform bidiagonal QR iteration, computing left
  3595. singular vectors of A in A and computing right
  3596. singular vectors of A in VT (Workspace: need BDSPAC) */
  3597. NUMlapack_dbdsqr ("U", n, n, m, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt, &a[a_offset],
  3598. lda, dum, &c__1, &work[iwork], info);
  3599. }
  3600. } else if (wntus) {
  3601. if (wntvn) {
  3602. /* Path 4 (M much larger than N, JOBU='S', JOBVT='N') N
  3603. left singular vectors to be computed in U and no right
  3604. singular vectors to be computed
  3605. Computing MAX */
  3606. i__2 = *n << 2;
  3607. if (*lwork >= *n * *n + MAX (i__2, bdspac)) {
  3608. /* Sufficient workspace for a fast algorithm */
  3609. ir = 1;
  3610. if (*lwork >= wrkbl + *lda * *n) {
  3611. /* WORK(IR) is LDA by N */
  3612. ldwrkr = *lda;
  3613. } else {
  3614. /* WORK(IR) is N by N */
  3615. ldwrkr = *n;
  3616. }
  3617. itau = ir + ldwrkr * *n;
  3618. iwork = itau + *n;
  3619. /* Compute A=Q*R (Workspace: need N*N+2*N, prefer
  3620. N*N+N+N*NB) */
  3621. i__2 = *lwork - iwork + 1;
  3622. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3623. /* Copy R to WORK(IR), zeroing out below it */
  3624. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
  3625. i__2 = *n - 1;
  3626. i__3 = *n - 1;
  3627. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &work[ir + 1], &ldwrkr);
  3628. /* Generate Q in A (Workspace: need N*N+2*N, prefer
  3629. N*N+N+N*NB) */
  3630. i__2 = *lwork - iwork + 1;
  3631. NUMlapack_dorgqr (m, n, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3632. ie = itau;
  3633. itauq = ie + *n;
  3634. itaup = itauq + *n;
  3635. iwork = itaup + *n;
  3636. /* Bidiagonalize R in WORK(IR) (Workspace: need
  3637. N*N+4*N, prefer N*N+3*N+2*N*NB) */
  3638. i__2 = *lwork - iwork + 1;
  3639. NUMlapack_dgebrd (n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[itauq],
  3640. &work[itaup], &work[iwork], &i__2, &ierr);
  3641. /* Generate left vectors bidiagonalizing R in
  3642. WORK(IR) (Workspace: need N*N+4*N, prefer
  3643. N*N+3*N+N*NB) */
  3644. i__2 = *lwork - iwork + 1;
  3645. NUMlapack_dorgbr ("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &work[iwork], &i__2,
  3646. &ierr);
  3647. iwork = ie + *n;
  3648. /* Perform bidiagonal QR iteration, computing left
  3649. singular vectors of R in WORK(IR) (Workspace: need
  3650. N*N+BDSPAC) */
  3651. NUMlapack_dbdsqr ("U", n, &c__0, n, &c__0, &s[1], &work[ie], dum, &c__1, &work[ir],
  3652. &ldwrkr, dum, &c__1, &work[iwork], info);
  3653. /* Multiply Q in A by left singular vectors of R in
  3654. WORK(IR), storing result in U (Workspace: need
  3655. N*N) */
  3656. NUMblas_dgemm ("N", "N", m, n, n, &c_b438, &a[a_offset], lda, &work[ir], &ldwrkr, &c_b416,
  3657. &u[u_offset], ldu);
  3658. } else {
  3659. /* Insufficient workspace for a fast algorithm */
  3660. itau = 1;
  3661. iwork = itau + *n;
  3662. /* Compute A=Q*R, copying result to U (Workspace:
  3663. need 2*N, prefer N+N*NB) */
  3664. i__2 = *lwork - iwork + 1;
  3665. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3666. NUMlapack_dlacpy ("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  3667. /* Generate Q in U (Workspace: need 2*N, prefer
  3668. N+N*NB) */
  3669. i__2 = *lwork - iwork + 1;
  3670. NUMlapack_dorgqr (m, n, n, &u[u_offset], ldu, &work[itau], &work[iwork], &i__2, &ierr);
  3671. ie = itau;
  3672. itauq = ie + *n;
  3673. itaup = itauq + *n;
  3674. iwork = itaup + *n;
  3675. /* Zero out below R in A */
  3676. i__2 = *n - 1;
  3677. i__3 = *n - 1;
  3678. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &a_ref (2, 1), lda);
  3679. /* Bidiagonalize R in A (Workspace: need 4*N, prefer
  3680. 3*N+2*N*NB) */
  3681. i__2 = *lwork - iwork + 1;
  3682. NUMlapack_dgebrd (n, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  3683. &work[iwork], &i__2, &ierr);
  3684. /* Multiply Q in U by left vectors bidiagonalizing R
  3685. (Workspace: need 3*N+M, prefer 3*N+M*NB) */
  3686. i__2 = *lwork - iwork + 1;
  3687. NUMlapack_dormbr ("Q", "R", "N", m, n, n, &a[a_offset], lda, &work[itauq], &u[u_offset],
  3688. ldu, &work[iwork], &i__2, &ierr);
  3689. iwork = ie + *n;
  3690. /* Perform bidiagonal QR iteration, computing left
  3691. singular vectors of A in U (Workspace: need
  3692. BDSPAC) */
  3693. NUMlapack_dbdsqr ("U", n, &c__0, m, &c__0, &s[1], &work[ie], dum, &c__1, &u[u_offset],
  3694. ldu, dum, &c__1, &work[iwork], info);
  3695. }
  3696. } else if (wntvo) {
  3697. /* Path 5 (M much larger than N, JOBU='S', JOBVT='O') N
  3698. left singular vectors to be computed in U and N right
  3699. singular vectors to be overwritten on A
  3700. Computing MAX */
  3701. i__2 = *n << 2;
  3702. if (*lwork >= (*n << 1) * *n + MAX (i__2, bdspac)) {
  3703. /* Sufficient workspace for a fast algorithm */
  3704. iu = 1;
  3705. if (*lwork >= wrkbl + (*lda << 1) * *n) {
  3706. /* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
  3707. ldwrku = *lda;
  3708. ir = iu + ldwrku * *n;
  3709. ldwrkr = *lda;
  3710. } else if (*lwork >= wrkbl + (*lda + *n) * *n) {
  3711. /* WORK(IU) is LDA by N and WORK(IR) is N by N */
  3712. ldwrku = *lda;
  3713. ir = iu + ldwrku * *n;
  3714. ldwrkr = *n;
  3715. } else {
  3716. /* WORK(IU) is N by N and WORK(IR) is N by N */
  3717. ldwrku = *n;
  3718. ir = iu + ldwrku * *n;
  3719. ldwrkr = *n;
  3720. }
  3721. itau = ir + ldwrkr * *n;
  3722. iwork = itau + *n;
  3723. /* Compute A=Q*R (Workspace: need 2*N*N+2*N, prefer
  3724. 2*N*N+N+N*NB) */
  3725. i__2 = *lwork - iwork + 1;
  3726. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3727. /* Copy R to WORK(IU), zeroing out below it */
  3728. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &work[iu], &ldwrku);
  3729. i__2 = *n - 1;
  3730. i__3 = *n - 1;
  3731. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &work[iu + 1], &ldwrku);
  3732. /* Generate Q in A (Workspace: need 2*N*N+2*N, prefer
  3733. 2*N*N+N+N*NB) */
  3734. i__2 = *lwork - iwork + 1;
  3735. NUMlapack_dorgqr (m, n, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3736. ie = itau;
  3737. itauq = ie + *n;
  3738. itaup = itauq + *n;
  3739. iwork = itaup + *n;
  3740. /* Bidiagonalize R in WORK(IU), copying result to
  3741. WORK(IR) (Workspace: need 2*N*N+4*N, prefer
  3742. 2*N*N+3*N+2*N*NB) */
  3743. i__2 = *lwork - iwork + 1;
  3744. NUMlapack_dgebrd (n, n, &work[iu], &ldwrku, &s[1], &work[ie], &work[itauq],
  3745. &work[itaup], &work[iwork], &i__2, &ierr);
  3746. NUMlapack_dlacpy ("U", n, n, &work[iu], &ldwrku, &work[ir], &ldwrkr);
  3747. /* Generate left bidiagonalizing vectors in WORK(IU)
  3748. (Workspace: need 2*N*N+4*N, prefer 2*N*N+3*N+N*NB)
  3749. */
  3750. i__2 = *lwork - iwork + 1;
  3751. NUMlapack_dorgbr ("Q", n, n, n, &work[iu], &ldwrku, &work[itauq], &work[iwork], &i__2,
  3752. &ierr);
  3753. /* Generate right bidiagonalizing vectors in WORK(IR)
  3754. (Workspace: need 2*N*N+4*N-1, prefer
  3755. 2*N*N+3*N+(N-1)*NB) */
  3756. i__2 = *lwork - iwork + 1;
  3757. NUMlapack_dorgbr ("P", n, n, n, &work[ir], &ldwrkr, &work[itaup], &work[iwork], &i__2,
  3758. &ierr);
  3759. iwork = ie + *n;
  3760. /* Perform bidiagonal QR iteration, computing left
  3761. singular vectors of R in WORK(IU) and computing
  3762. right singular vectors of R in WORK(IR)
  3763. (Workspace: need 2*N*N+BDSPAC) */
  3764. NUMlapack_dbdsqr ("U", n, n, n, &c__0, &s[1], &work[ie], &work[ir], &ldwrkr, &work[iu],
  3765. &ldwrku, dum, &c__1, &work[iwork], info);
  3766. /* Multiply Q in A by left singular vectors of R in
  3767. WORK(IU), storing result in U (Workspace: need
  3768. N*N) */
  3769. NUMblas_dgemm ("N", "N", m, n, n, &c_b438, &a[a_offset], lda, &work[iu], &ldwrku, &c_b416,
  3770. &u[u_offset], ldu);
  3771. /* Copy right singular vectors of R to A (Workspace:
  3772. need N*N) */
  3773. NUMlapack_dlacpy ("F", n, n, &work[ir], &ldwrkr, &a[a_offset], lda);
  3774. } else {
  3775. /* Insufficient workspace for a fast algorithm */
  3776. itau = 1;
  3777. iwork = itau + *n;
  3778. /* Compute A=Q*R, copying result to U (Workspace:
  3779. need 2*N, prefer N+N*NB) */
  3780. i__2 = *lwork - iwork + 1;
  3781. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3782. NUMlapack_dlacpy ("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  3783. /* Generate Q in U (Workspace: need 2*N, prefer
  3784. N+N*NB) */
  3785. i__2 = *lwork - iwork + 1;
  3786. NUMlapack_dorgqr (m, n, n, &u[u_offset], ldu, &work[itau], &work[iwork], &i__2, &ierr);
  3787. ie = itau;
  3788. itauq = ie + *n;
  3789. itaup = itauq + *n;
  3790. iwork = itaup + *n;
  3791. /* Zero out below R in A */
  3792. i__2 = *n - 1;
  3793. i__3 = *n - 1;
  3794. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &a_ref (2, 1), lda);
  3795. /* Bidiagonalize R in A (Workspace: need 4*N, prefer
  3796. 3*N+2*N*NB) */
  3797. i__2 = *lwork - iwork + 1;
  3798. NUMlapack_dgebrd (n, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  3799. &work[iwork], &i__2, &ierr);
  3800. /* Multiply Q in U by left vectors bidiagonalizing R
  3801. (Workspace: need 3*N+M, prefer 3*N+M*NB) */
  3802. i__2 = *lwork - iwork + 1;
  3803. NUMlapack_dormbr ("Q", "R", "N", m, n, n, &a[a_offset], lda, &work[itauq], &u[u_offset],
  3804. ldu, &work[iwork], &i__2, &ierr);
  3805. /* Generate right vectors bidiagonalizing R in A
  3806. (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
  3807. i__2 = *lwork - iwork + 1;
  3808. NUMlapack_dorgbr ("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[iwork], &i__2,
  3809. &ierr);
  3810. iwork = ie + *n;
  3811. /* Perform bidiagonal QR iteration, computing left
  3812. singular vectors of A in U and computing right
  3813. singular vectors of A in A (Workspace: need
  3814. BDSPAC) */
  3815. NUMlapack_dbdsqr ("U", n, n, m, &c__0, &s[1], &work[ie], &a[a_offset], lda,
  3816. &u[u_offset], ldu, dum, &c__1, &work[iwork], info);
  3817. }
  3818. } else if (wntvas) {
  3819. /* Path 6 (M much larger than N, JOBU='S', JOBVT='S' or
  3820. 'A') N left singular vectors to be computed in U and N
  3821. right singular vectors to be computed in VT
  3822. Computing MAX */
  3823. i__2 = *n << 2;
  3824. if (*lwork >= *n * *n + MAX (i__2, bdspac)) {
  3825. /* Sufficient workspace for a fast algorithm */
  3826. iu = 1;
  3827. if (*lwork >= wrkbl + *lda * *n) {
  3828. /* WORK(IU) is LDA by N */
  3829. ldwrku = *lda;
  3830. } else {
  3831. /* WORK(IU) is N by N */
  3832. ldwrku = *n;
  3833. }
  3834. itau = iu + ldwrku * *n;
  3835. iwork = itau + *n;
  3836. /* Compute A=Q*R (Workspace: need N*N+2*N, prefer
  3837. N*N+N+N*NB) */
  3838. i__2 = *lwork - iwork + 1;
  3839. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3840. /* Copy R to WORK(IU), zeroing out below it */
  3841. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &work[iu], &ldwrku);
  3842. i__2 = *n - 1;
  3843. i__3 = *n - 1;
  3844. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &work[iu + 1], &ldwrku);
  3845. /* Generate Q in A (Workspace: need N*N+2*N, prefer
  3846. N*N+N+N*NB) */
  3847. i__2 = *lwork - iwork + 1;
  3848. NUMlapack_dorgqr (m, n, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3849. ie = itau;
  3850. itauq = ie + *n;
  3851. itaup = itauq + *n;
  3852. iwork = itaup + *n;
  3853. /* Bidiagonalize R in WORK(IU), copying result to VT
  3854. (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
  3855. i__2 = *lwork - iwork + 1;
  3856. NUMlapack_dgebrd (n, n, &work[iu], &ldwrku, &s[1], &work[ie], &work[itauq],
  3857. &work[itaup], &work[iwork], &i__2, &ierr);
  3858. NUMlapack_dlacpy ("U", n, n, &work[iu], &ldwrku, &vt[vt_offset], ldvt);
  3859. /* Generate left bidiagonalizing vectors in WORK(IU)
  3860. (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
  3861. i__2 = *lwork - iwork + 1;
  3862. NUMlapack_dorgbr ("Q", n, n, n, &work[iu], &ldwrku, &work[itauq], &work[iwork], &i__2,
  3863. &ierr);
  3864. /* Generate right bidiagonalizing vectors in VT
  3865. (Workspace: need N*N+4*N-1, prefer
  3866. N*N+3*N+(N-1)*NB) */
  3867. i__2 = *lwork - iwork + 1;
  3868. NUMlapack_dorgbr ("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &work[iwork], &i__2,
  3869. &ierr);
  3870. iwork = ie + *n;
  3871. /* Perform bidiagonal QR iteration, computing left
  3872. singular vectors of R in WORK(IU) and computing
  3873. right singular vectors of R in VT (Workspace: need
  3874. N*N+BDSPAC) */
  3875. NUMlapack_dbdsqr ("U", n, n, n, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  3876. &work[iu], &ldwrku, dum, &c__1, &work[iwork], info);
  3877. /* Multiply Q in A by left singular vectors of R in
  3878. WORK(IU), storing result in U (Workspace: need
  3879. N*N) */
  3880. NUMblas_dgemm ("N", "N", m, n, n, &c_b438, &a[a_offset], lda, &work[iu], &ldwrku, &c_b416,
  3881. &u[u_offset], ldu);
  3882. } else {
  3883. /* Insufficient workspace for a fast algorithm */
  3884. itau = 1;
  3885. iwork = itau + *n;
  3886. /* Compute A=Q*R, copying result to U (Workspace:
  3887. need 2*N, prefer N+N*NB) */
  3888. i__2 = *lwork - iwork + 1;
  3889. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3890. NUMlapack_dlacpy ("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  3891. /* Generate Q in U (Workspace: need 2*N, prefer
  3892. N+N*NB) */
  3893. i__2 = *lwork - iwork + 1;
  3894. NUMlapack_dorgqr (m, n, n, &u[u_offset], ldu, &work[itau], &work[iwork], &i__2, &ierr);
  3895. /* Copy R to VT, zeroing out below it */
  3896. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  3897. i__2 = *n - 1;
  3898. i__3 = *n - 1;
  3899. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &vt_ref (2, 1), ldvt);
  3900. ie = itau;
  3901. itauq = ie + *n;
  3902. itaup = itauq + *n;
  3903. iwork = itaup + *n;
  3904. /* Bidiagonalize R in VT (Workspace: need 4*N, prefer
  3905. 3*N+2*N*NB) */
  3906. i__2 = *lwork - iwork + 1;
  3907. NUMlapack_dgebrd (n, n, &vt[vt_offset], ldvt, &s[1], &work[ie], &work[itauq],
  3908. &work[itaup], &work[iwork], &i__2, &ierr);
  3909. /* Multiply Q in U by left bidiagonalizing vectors in
  3910. VT (Workspace: need 3*N+M, prefer 3*N+M*NB) */
  3911. i__2 = *lwork - iwork + 1;
  3912. NUMlapack_dormbr ("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt, &work[itauq],
  3913. &u[u_offset], ldu, &work[iwork], &i__2, &ierr);
  3914. /* Generate right bidiagonalizing vectors in VT
  3915. (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
  3916. i__2 = *lwork - iwork + 1;
  3917. NUMlapack_dorgbr ("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &work[iwork], &i__2,
  3918. &ierr);
  3919. iwork = ie + *n;
  3920. /* Perform bidiagonal QR iteration, computing left
  3921. singular vectors of A in U and computing right
  3922. singular vectors of A in VT (Workspace: need
  3923. BDSPAC) */
  3924. NUMlapack_dbdsqr ("U", n, n, m, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  3925. &u[u_offset], ldu, dum, &c__1, &work[iwork], info);
  3926. }
  3927. }
  3928. } else if (wntua) {
  3929. if (wntvn) {
  3930. /* Path 7 (M much larger than N, JOBU='A', JOBVT='N') M
  3931. left singular vectors to be computed in U and no right
  3932. singular vectors to be computed
  3933. Computing MAX */
  3934. i__2 = *n + *m, i__3 = *n << 2, i__2 = MAX (i__2, i__3);
  3935. if (*lwork >= *n * *n + MAX (i__2, bdspac)) {
  3936. /* Sufficient workspace for a fast algorithm */
  3937. ir = 1;
  3938. if (*lwork >= wrkbl + *lda * *n) {
  3939. /* WORK(IR) is LDA by N */
  3940. ldwrkr = *lda;
  3941. } else {
  3942. /* WORK(IR) is N by N */
  3943. ldwrkr = *n;
  3944. }
  3945. itau = ir + ldwrkr * *n;
  3946. iwork = itau + *n;
  3947. /* Compute A=Q*R, copying result to U (Workspace:
  3948. need N*N+2*N, prefer N*N+N+N*NB) */
  3949. i__2 = *lwork - iwork + 1;
  3950. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3951. NUMlapack_dlacpy ("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  3952. /* Copy R to WORK(IR), zeroing out below it */
  3953. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
  3954. i__2 = *n - 1;
  3955. i__3 = *n - 1;
  3956. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &work[ir + 1], &ldwrkr);
  3957. /* Generate Q in U (Workspace: need N*N+N+M, prefer
  3958. N*N+N+M*NB) */
  3959. i__2 = *lwork - iwork + 1;
  3960. NUMlapack_dorgqr (m, m, n, &u[u_offset], ldu, &work[itau], &work[iwork], &i__2, &ierr);
  3961. ie = itau;
  3962. itauq = ie + *n;
  3963. itaup = itauq + *n;
  3964. iwork = itaup + *n;
  3965. /* Bidiagonalize R in WORK(IR) (Workspace: need
  3966. N*N+4*N, prefer N*N+3*N+2*N*NB) */
  3967. i__2 = *lwork - iwork + 1;
  3968. NUMlapack_dgebrd (n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[itauq],
  3969. &work[itaup], &work[iwork], &i__2, &ierr);
  3970. /* Generate left bidiagonalizing vectors in WORK(IR)
  3971. (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
  3972. i__2 = *lwork - iwork + 1;
  3973. NUMlapack_dorgbr ("Q", n, n, n, &work[ir], &ldwrkr, &work[itauq], &work[iwork], &i__2,
  3974. &ierr);
  3975. iwork = ie + *n;
  3976. /* Perform bidiagonal QR iteration, computing left
  3977. singular vectors of R in WORK(IR) (Workspace: need
  3978. N*N+BDSPAC) */
  3979. NUMlapack_dbdsqr ("U", n, &c__0, n, &c__0, &s[1], &work[ie], dum, &c__1, &work[ir],
  3980. &ldwrkr, dum, &c__1, &work[iwork], info);
  3981. /* Multiply Q in U by left singular vectors of R in
  3982. WORK(IR), storing result in A (Workspace: need
  3983. N*N) */
  3984. NUMblas_dgemm ("N", "N", m, n, n, &c_b438, &u[u_offset], ldu, &work[ir], &ldwrkr, &c_b416,
  3985. &a[a_offset], lda);
  3986. /* Copy left singular vectors of A from A to U */
  3987. NUMlapack_dlacpy ("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  3988. } else {
  3989. /* Insufficient workspace for a fast algorithm */
  3990. itau = 1;
  3991. iwork = itau + *n;
  3992. /* Compute A=Q*R, copying result to U (Workspace:
  3993. need 2*N, prefer N+N*NB) */
  3994. i__2 = *lwork - iwork + 1;
  3995. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  3996. NUMlapack_dlacpy ("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  3997. /* Generate Q in U (Workspace: need N+M, prefer
  3998. N+M*NB) */
  3999. i__2 = *lwork - iwork + 1;
  4000. NUMlapack_dorgqr (m, m, n, &u[u_offset], ldu, &work[itau], &work[iwork], &i__2, &ierr);
  4001. ie = itau;
  4002. itauq = ie + *n;
  4003. itaup = itauq + *n;
  4004. iwork = itaup + *n;
  4005. /* Zero out below R in A */
  4006. i__2 = *n - 1;
  4007. i__3 = *n - 1;
  4008. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &a_ref (2, 1), lda);
  4009. /* Bidiagonalize R in A (Workspace: need 4*N, prefer
  4010. 3*N+2*N*NB) */
  4011. i__2 = *lwork - iwork + 1;
  4012. NUMlapack_dgebrd (n, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  4013. &work[iwork], &i__2, &ierr);
  4014. /* Multiply Q in U by left bidiagonalizing vectors in
  4015. A (Workspace: need 3*N+M, prefer 3*N+M*NB) */
  4016. i__2 = *lwork - iwork + 1;
  4017. NUMlapack_dormbr ("Q", "R", "N", m, n, n, &a[a_offset], lda, &work[itauq], &u[u_offset],
  4018. ldu, &work[iwork], &i__2, &ierr);
  4019. iwork = ie + *n;
  4020. /* Perform bidiagonal QR iteration, computing left
  4021. singular vectors of A in U (Workspace: need
  4022. BDSPAC) */
  4023. NUMlapack_dbdsqr ("U", n, &c__0, m, &c__0, &s[1], &work[ie], dum, &c__1, &u[u_offset],
  4024. ldu, dum, &c__1, &work[iwork], info);
  4025. }
  4026. } else if (wntvo) {
  4027. /* Path 8 (M much larger than N, JOBU='A', JOBVT='O') M
  4028. left singular vectors to be computed in U and N right
  4029. singular vectors to be overwritten on A
  4030. Computing MAX */
  4031. i__2 = *n + *m, i__3 = *n << 2, i__2 = MAX (i__2, i__3);
  4032. if (*lwork >= (*n << 1) * *n + MAX (i__2, bdspac)) {
  4033. /* Sufficient workspace for a fast algorithm */
  4034. iu = 1;
  4035. if (*lwork >= wrkbl + (*lda << 1) * *n) {
  4036. /* WORK(IU) is LDA by N and WORK(IR) is LDA by N */
  4037. ldwrku = *lda;
  4038. ir = iu + ldwrku * *n;
  4039. ldwrkr = *lda;
  4040. } else if (*lwork >= wrkbl + (*lda + *n) * *n) {
  4041. /* WORK(IU) is LDA by N and WORK(IR) is N by N */
  4042. ldwrku = *lda;
  4043. ir = iu + ldwrku * *n;
  4044. ldwrkr = *n;
  4045. } else {
  4046. /* WORK(IU) is N by N and WORK(IR) is N by N */
  4047. ldwrku = *n;
  4048. ir = iu + ldwrku * *n;
  4049. ldwrkr = *n;
  4050. }
  4051. itau = ir + ldwrkr * *n;
  4052. iwork = itau + *n;
  4053. /* Compute A=Q*R, copying result to U (Workspace:
  4054. need 2*N*N+2*N, prefer 2*N*N+N+N*NB) */
  4055. i__2 = *lwork - iwork + 1;
  4056. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4057. NUMlapack_dlacpy ("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  4058. /* Generate Q in U (Workspace: need 2*N*N+N+M, prefer
  4059. 2*N*N+N+M*NB) */
  4060. i__2 = *lwork - iwork + 1;
  4061. NUMlapack_dorgqr (m, m, n, &u[u_offset], ldu, &work[itau], &work[iwork], &i__2, &ierr);
  4062. /* Copy R to WORK(IU), zeroing out below it */
  4063. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &work[iu], &ldwrku);
  4064. i__2 = *n - 1;
  4065. i__3 = *n - 1;
  4066. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &work[iu + 1], &ldwrku);
  4067. ie = itau;
  4068. itauq = ie + *n;
  4069. itaup = itauq + *n;
  4070. iwork = itaup + *n;
  4071. /* Bidiagonalize R in WORK(IU), copying result to
  4072. WORK(IR) (Workspace: need 2*N*N+4*N, prefer
  4073. 2*N*N+3*N+2*N*NB) */
  4074. i__2 = *lwork - iwork + 1;
  4075. NUMlapack_dgebrd (n, n, &work[iu], &ldwrku, &s[1], &work[ie], &work[itauq],
  4076. &work[itaup], &work[iwork], &i__2, &ierr);
  4077. NUMlapack_dlacpy ("U", n, n, &work[iu], &ldwrku, &work[ir], &ldwrkr);
  4078. /* Generate left bidiagonalizing vectors in WORK(IU)
  4079. (Workspace: need 2*N*N+4*N, prefer 2*N*N+3*N+N*NB)
  4080. */
  4081. i__2 = *lwork - iwork + 1;
  4082. NUMlapack_dorgbr ("Q", n, n, n, &work[iu], &ldwrku, &work[itauq], &work[iwork], &i__2,
  4083. &ierr);
  4084. /* Generate right bidiagonalizing vectors in WORK(IR)
  4085. (Workspace: need 2*N*N+4*N-1, prefer
  4086. 2*N*N+3*N+(N-1)*NB) */
  4087. i__2 = *lwork - iwork + 1;
  4088. NUMlapack_dorgbr ("P", n, n, n, &work[ir], &ldwrkr, &work[itaup], &work[iwork], &i__2,
  4089. &ierr);
  4090. iwork = ie + *n;
  4091. /* Perform bidiagonal QR iteration, computing left
  4092. singular vectors of R in WORK(IU) and computing
  4093. right singular vectors of R in WORK(IR)
  4094. (Workspace: need 2*N*N+BDSPAC) */
  4095. NUMlapack_dbdsqr ("U", n, n, n, &c__0, &s[1], &work[ie], &work[ir], &ldwrkr, &work[iu],
  4096. &ldwrku, dum, &c__1, &work[iwork], info);
  4097. /* Multiply Q in U by left singular vectors of R in
  4098. WORK(IU), storing result in A (Workspace: need
  4099. N*N) */
  4100. NUMblas_dgemm ("N", "N", m, n, n, &c_b438, &u[u_offset], ldu, &work[iu], &ldwrku, &c_b416,
  4101. &a[a_offset], lda);
  4102. /* Copy left singular vectors of A from A to U */
  4103. NUMlapack_dlacpy ("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  4104. /* Copy right singular vectors of R from WORK(IR) to
  4105. A */
  4106. NUMlapack_dlacpy ("F", n, n, &work[ir], &ldwrkr, &a[a_offset], lda);
  4107. } else {
  4108. /* Insufficient workspace for a fast algorithm */
  4109. itau = 1;
  4110. iwork = itau + *n;
  4111. /* Compute A=Q*R, copying result to U (Workspace:
  4112. need 2*N, prefer N+N*NB) */
  4113. i__2 = *lwork - iwork + 1;
  4114. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4115. NUMlapack_dlacpy ("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  4116. /* Generate Q in U (Workspace: need N+M, prefer
  4117. N+M*NB) */
  4118. i__2 = *lwork - iwork + 1;
  4119. NUMlapack_dorgqr (m, m, n, &u[u_offset], ldu, &work[itau], &work[iwork], &i__2, &ierr);
  4120. ie = itau;
  4121. itauq = ie + *n;
  4122. itaup = itauq + *n;
  4123. iwork = itaup + *n;
  4124. /* Zero out below R in A */
  4125. i__2 = *n - 1;
  4126. i__3 = *n - 1;
  4127. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &a_ref (2, 1), lda);
  4128. /* Bidiagonalize R in A (Workspace: need 4*N, prefer
  4129. 3*N+2*N*NB) */
  4130. i__2 = *lwork - iwork + 1;
  4131. NUMlapack_dgebrd (n, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  4132. &work[iwork], &i__2, &ierr);
  4133. /* Multiply Q in U by left bidiagonalizing vectors in
  4134. A (Workspace: need 3*N+M, prefer 3*N+M*NB) */
  4135. i__2 = *lwork - iwork + 1;
  4136. NUMlapack_dormbr ("Q", "R", "N", m, n, n, &a[a_offset], lda, &work[itauq], &u[u_offset],
  4137. ldu, &work[iwork], &i__2, &ierr);
  4138. /* Generate right bidiagonalizing vectors in A
  4139. (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
  4140. i__2 = *lwork - iwork + 1;
  4141. NUMlapack_dorgbr ("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[iwork], &i__2,
  4142. &ierr);
  4143. iwork = ie + *n;
  4144. /* Perform bidiagonal QR iteration, computing left
  4145. singular vectors of A in U and computing right
  4146. singular vectors of A in A (Workspace: need
  4147. BDSPAC) */
  4148. NUMlapack_dbdsqr ("U", n, n, m, &c__0, &s[1], &work[ie], &a[a_offset], lda,
  4149. &u[u_offset], ldu, dum, &c__1, &work[iwork], info);
  4150. }
  4151. } else if (wntvas) {
  4152. /* Path 9 (M much larger than N, JOBU='A', JOBVT='S' or
  4153. 'A') M left singular vectors to be computed in U and N
  4154. right singular vectors to be computed in VT
  4155. Computing MAX */
  4156. i__2 = *n + *m, i__3 = *n << 2, i__2 = MAX (i__2, i__3);
  4157. if (*lwork >= *n * *n + MAX (i__2, bdspac)) {
  4158. /* Sufficient workspace for a fast algorithm */
  4159. iu = 1;
  4160. if (*lwork >= wrkbl + *lda * *n) {
  4161. /* WORK(IU) is LDA by N */
  4162. ldwrku = *lda;
  4163. } else {
  4164. /* WORK(IU) is N by N */
  4165. ldwrku = *n;
  4166. }
  4167. itau = iu + ldwrku * *n;
  4168. iwork = itau + *n;
  4169. /* Compute A=Q*R, copying result to U (Workspace:
  4170. need N*N+2*N, prefer N*N+N+N*NB) */
  4171. i__2 = *lwork - iwork + 1;
  4172. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4173. NUMlapack_dlacpy ("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  4174. /* Generate Q in U (Workspace: need N*N+N+M, prefer
  4175. N*N+N+M*NB) */
  4176. i__2 = *lwork - iwork + 1;
  4177. NUMlapack_dorgqr (m, m, n, &u[u_offset], ldu, &work[itau], &work[iwork], &i__2, &ierr);
  4178. /* Copy R to WORK(IU), zeroing out below it */
  4179. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &work[iu], &ldwrku);
  4180. i__2 = *n - 1;
  4181. i__3 = *n - 1;
  4182. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &work[iu + 1], &ldwrku);
  4183. ie = itau;
  4184. itauq = ie + *n;
  4185. itaup = itauq + *n;
  4186. iwork = itaup + *n;
  4187. /* Bidiagonalize R in WORK(IU), copying result to VT
  4188. (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */
  4189. i__2 = *lwork - iwork + 1;
  4190. NUMlapack_dgebrd (n, n, &work[iu], &ldwrku, &s[1], &work[ie], &work[itauq],
  4191. &work[itaup], &work[iwork], &i__2, &ierr);
  4192. NUMlapack_dlacpy ("U", n, n, &work[iu], &ldwrku, &vt[vt_offset], ldvt);
  4193. /* Generate left bidiagonalizing vectors in WORK(IU)
  4194. (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB) */
  4195. i__2 = *lwork - iwork + 1;
  4196. NUMlapack_dorgbr ("Q", n, n, n, &work[iu], &ldwrku, &work[itauq], &work[iwork], &i__2,
  4197. &ierr);
  4198. /* Generate right bidiagonalizing vectors in VT
  4199. (Workspace: need N*N+4*N-1, prefer
  4200. N*N+3*N+(N-1)*NB) */
  4201. i__2 = *lwork - iwork + 1;
  4202. NUMlapack_dorgbr ("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &work[iwork], &i__2,
  4203. &ierr);
  4204. iwork = ie + *n;
  4205. /* Perform bidiagonal QR iteration, computing left
  4206. singular vectors of R in WORK(IU) and computing
  4207. right singular vectors of R in VT (Workspace: need
  4208. N*N+BDSPAC) */
  4209. NUMlapack_dbdsqr ("U", n, n, n, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  4210. &work[iu], &ldwrku, dum, &c__1, &work[iwork], info);
  4211. /* Multiply Q in U by left singular vectors of R in
  4212. WORK(IU), storing result in A (Workspace: need
  4213. N*N) */
  4214. NUMblas_dgemm ("N", "N", m, n, n, &c_b438, &u[u_offset], ldu, &work[iu], &ldwrku, &c_b416,
  4215. &a[a_offset], lda);
  4216. /* Copy left singular vectors of A from A to U */
  4217. NUMlapack_dlacpy ("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  4218. } else {
  4219. /* Insufficient workspace for a fast algorithm */
  4220. itau = 1;
  4221. iwork = itau + *n;
  4222. /* Compute A=Q*R, copying result to U (Workspace:
  4223. need 2*N, prefer N+N*NB) */
  4224. i__2 = *lwork - iwork + 1;
  4225. NUMlapack_dgeqrf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4226. NUMlapack_dlacpy ("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  4227. /* Generate Q in U (Workspace: need N+M, prefer
  4228. N+M*NB) */
  4229. i__2 = *lwork - iwork + 1;
  4230. NUMlapack_dorgqr (m, m, n, &u[u_offset], ldu, &work[itau], &work[iwork], &i__2, &ierr);
  4231. /* Copy R from A to VT, zeroing out below it */
  4232. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  4233. i__2 = *n - 1;
  4234. i__3 = *n - 1;
  4235. NUMlapack_dlaset ("L", &i__2, &i__3, &c_b416, &c_b416, &vt_ref (2, 1), ldvt);
  4236. ie = itau;
  4237. itauq = ie + *n;
  4238. itaup = itauq + *n;
  4239. iwork = itaup + *n;
  4240. /* Bidiagonalize R in VT (Workspace: need 4*N, prefer
  4241. 3*N+2*N*NB) */
  4242. i__2 = *lwork - iwork + 1;
  4243. NUMlapack_dgebrd (n, n, &vt[vt_offset], ldvt, &s[1], &work[ie], &work[itauq],
  4244. &work[itaup], &work[iwork], &i__2, &ierr);
  4245. /* Multiply Q in U by left bidiagonalizing vectors in
  4246. VT (Workspace: need 3*N+M, prefer 3*N+M*NB) */
  4247. i__2 = *lwork - iwork + 1;
  4248. NUMlapack_dormbr ("Q", "R", "N", m, n, n, &vt[vt_offset], ldvt, &work[itauq],
  4249. &u[u_offset], ldu, &work[iwork], &i__2, &ierr);
  4250. /* Generate right bidiagonalizing vectors in VT
  4251. (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
  4252. i__2 = *lwork - iwork + 1;
  4253. NUMlapack_dorgbr ("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &work[iwork], &i__2,
  4254. &ierr);
  4255. iwork = ie + *n;
  4256. /* Perform bidiagonal QR iteration, computing left
  4257. singular vectors of A in U and computing right
  4258. singular vectors of A in VT (Workspace: need
  4259. BDSPAC) */
  4260. NUMlapack_dbdsqr ("U", n, n, m, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  4261. &u[u_offset], ldu, dum, &c__1, &work[iwork], info);
  4262. }
  4263. }
  4264. }
  4265. } else {
  4266. /* M .LT. MNTHR
  4267. Path 10 (M at least N, but not much larger) Reduce to
  4268. bidiagonal form without QR decomposition */
  4269. ie = 1;
  4270. itauq = ie + *n;
  4271. itaup = itauq + *n;
  4272. iwork = itaup + *n;
  4273. /* Bidiagonalize A (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB) */
  4274. i__2 = *lwork - iwork + 1;
  4275. NUMlapack_dgebrd (m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  4276. &work[iwork], &i__2, &ierr);
  4277. if (wntuas) {
  4278. /* If left singular vectors desired in U, copy result to U
  4279. and generate left bidiagonalizing vectors in U (Workspace:
  4280. need 3*N+NCU, prefer 3*N+NCU*NB) */
  4281. NUMlapack_dlacpy ("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  4282. if (wntus) {
  4283. ncu = *n;
  4284. }
  4285. if (wntua) {
  4286. ncu = *m;
  4287. }
  4288. i__2 = *lwork - iwork + 1;
  4289. NUMlapack_dorgbr ("Q", m, &ncu, n, &u[u_offset], ldu, &work[itauq], &work[iwork], &i__2, &ierr);
  4290. }
  4291. if (wntvas) {
  4292. /* If right singular vectors desired in VT, copy result to VT
  4293. and generate right bidiagonalizing vectors in VT
  4294. (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB) */
  4295. NUMlapack_dlacpy ("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  4296. i__2 = *lwork - iwork + 1;
  4297. NUMlapack_dorgbr ("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &work[iwork], &i__2, &ierr);
  4298. }
  4299. if (wntuo) {
  4300. /* If left singular vectors desired in A, generate left
  4301. bidiagonalizing vectors in A (Workspace: need 4*N, prefer
  4302. 3*N+N*NB) */
  4303. i__2 = *lwork - iwork + 1;
  4304. NUMlapack_dorgbr ("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[iwork], &i__2, &ierr);
  4305. }
  4306. if (wntvo) {
  4307. /* If right singular vectors desired in A, generate right
  4308. bidiagonalizing vectors in A (Workspace: need 4*N-1,
  4309. prefer 3*N+(N-1)*NB) */
  4310. i__2 = *lwork - iwork + 1;
  4311. NUMlapack_dorgbr ("P", n, n, n, &a[a_offset], lda, &work[itaup], &work[iwork], &i__2, &ierr);
  4312. }
  4313. iwork = ie + *n;
  4314. if (wntuas || wntuo) {
  4315. nru = *m;
  4316. }
  4317. if (wntun) {
  4318. nru = 0;
  4319. }
  4320. if (wntvas || wntvo) {
  4321. ncvt = *n;
  4322. }
  4323. if (wntvn) {
  4324. ncvt = 0;
  4325. }
  4326. if (!wntuo && !wntvo) {
  4327. /* Perform bidiagonal QR iteration, if desired, computing
  4328. left singular vectors in U and computing right singular
  4329. vectors in VT (Workspace: need BDSPAC) */
  4330. NUMlapack_dbdsqr ("U", n, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  4331. &u[u_offset], ldu, dum, &c__1, &work[iwork], info);
  4332. } else if (!wntuo && wntvo) {
  4333. /* Perform bidiagonal QR iteration, if desired, computing
  4334. left singular vectors in U and computing right singular
  4335. vectors in A (Workspace: need BDSPAC) */
  4336. NUMlapack_dbdsqr ("U", n, &ncvt, &nru, &c__0, &s[1], &work[ie], &a[a_offset], lda, &u[u_offset],
  4337. ldu, dum, &c__1, &work[iwork], info);
  4338. } else {
  4339. /* Perform bidiagonal QR iteration, if desired, computing
  4340. left singular vectors in A and computing right singular
  4341. vectors in VT (Workspace: need BDSPAC) */
  4342. NUMlapack_dbdsqr ("U", n, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  4343. &a[a_offset], lda, dum, &c__1, &work[iwork], info);
  4344. }
  4345. }
  4346. } else {
  4347. /* A has more columns than rows. If A has sufficiently more columns
  4348. than rows, first reduce using the LQ decomposition (if sufficient
  4349. workspace available) */
  4350. if (*n >= mnthr) {
  4351. if (wntvn) {
  4352. /* Path 1t(N much larger than M, JOBVT='N') No right singular
  4353. vectors to be computed */
  4354. itau = 1;
  4355. iwork = itau + *m;
  4356. /* Compute A=L*Q (Workspace: need 2*M, prefer M+M*NB) */
  4357. i__2 = *lwork - iwork + 1;
  4358. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4359. /* Zero out above L */
  4360. i__2 = *m - 1;
  4361. i__3 = *m - 1;
  4362. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &a_ref (1, 2), lda);
  4363. ie = 1;
  4364. itauq = ie + *m;
  4365. itaup = itauq + *m;
  4366. iwork = itaup + *m;
  4367. /* Bidiagonalize L in A (Workspace: need 4*M, prefer
  4368. 3*M+2*M*NB) */
  4369. i__2 = *lwork - iwork + 1;
  4370. NUMlapack_dgebrd (m, m, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  4371. &work[iwork], &i__2, &ierr);
  4372. if (wntuo || wntuas) {
  4373. /* If left singular vectors desired, generate Q
  4374. (Workspace: need 4*M, prefer 3*M+M*NB) */
  4375. i__2 = *lwork - iwork + 1;
  4376. NUMlapack_dorgbr ("Q", m, m, m, &a[a_offset], lda, &work[itauq], &work[iwork], &i__2,
  4377. &ierr);
  4378. }
  4379. iwork = ie + *m;
  4380. nru = 0;
  4381. if (wntuo || wntuas) {
  4382. nru = *m;
  4383. }
  4384. /* Perform bidiagonal QR iteration, computing left singular
  4385. vectors of A in A if desired (Workspace: need BDSPAC) */
  4386. NUMlapack_dbdsqr ("U", m, &c__0, &nru, &c__0, &s[1], &work[ie], dum, &c__1, &a[a_offset], lda,
  4387. dum, &c__1, &work[iwork], info);
  4388. /* If left singular vectors desired in U, copy them there */
  4389. if (wntuas) {
  4390. NUMlapack_dlacpy ("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
  4391. }
  4392. } else if (wntvo && wntun) {
  4393. /* Path 2t(N much larger than M, JOBU='N', JOBVT='O') M right
  4394. singular vectors to be overwritten on A and no left
  4395. singular vectors to be computed
  4396. Computing MAX */
  4397. i__2 = *m << 2;
  4398. if (*lwork >= *m * *m + MAX (i__2, bdspac)) {
  4399. /* Sufficient workspace for a fast algorithm */
  4400. ir = 1;
  4401. /* Computing MAX */
  4402. i__2 = wrkbl, i__3 = *lda * *n + *m;
  4403. if (*lwork >= MAX (i__2, i__3) + *lda * *m) {
  4404. /* WORK(IU) is LDA by N and WORK(IR) is LDA by M */
  4405. ldwrku = *lda;
  4406. chunk = *n;
  4407. ldwrkr = *lda;
  4408. } else { /* if(complicated condition) */
  4409. /* Computing MAX */
  4410. i__2 = wrkbl, i__3 = *lda * *n + *m;
  4411. if (*lwork >= MAX (i__2, i__3) + *m * *m) {
  4412. /* WORK(IU) is LDA by N and WORK(IR) is M by M */
  4413. ldwrku = *lda;
  4414. chunk = *n;
  4415. ldwrkr = *m;
  4416. } else {
  4417. /* WORK(IU) is M by CHUNK and WORK(IR) is M by M */
  4418. ldwrku = *m;
  4419. chunk = (*lwork - *m * *m - *m) / *m;
  4420. ldwrkr = *m;
  4421. }
  4422. }
  4423. itau = ir + ldwrkr * *m;
  4424. iwork = itau + *m;
  4425. /* Compute A=L*Q (Workspace: need M*M+2*M, prefer
  4426. M*M+M+M*NB) */
  4427. i__2 = *lwork - iwork + 1;
  4428. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4429. /* Copy L to WORK(IR) and zero out above it */
  4430. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &work[ir], &ldwrkr);
  4431. i__2 = *m - 1;
  4432. i__3 = *m - 1;
  4433. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &work[ir + ldwrkr], &ldwrkr);
  4434. /* Generate Q in A (Workspace: need M*M+2*M, prefer
  4435. M*M+M+M*NB) */
  4436. i__2 = *lwork - iwork + 1;
  4437. NUMlapack_dorglq (m, n, m, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4438. ie = itau;
  4439. itauq = ie + *m;
  4440. itaup = itauq + *m;
  4441. iwork = itaup + *m;
  4442. /* Bidiagonalize L in WORK(IR) (Workspace: need M*M+4*M,
  4443. prefer M*M+3*M+2*M*NB) */
  4444. i__2 = *lwork - iwork + 1;
  4445. NUMlapack_dgebrd (m, m, &work[ir], &ldwrkr, &s[1], &work[ie], &work[itauq], &work[itaup],
  4446. &work[iwork], &i__2, &ierr);
  4447. /* Generate right vectors bidiagonalizing L (Workspace:
  4448. need M*M+4*M-1, prefer M*M+3*M+(M-1)*NB) */
  4449. i__2 = *lwork - iwork + 1;
  4450. NUMlapack_dorgbr ("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &work[iwork], &i__2,
  4451. &ierr);
  4452. iwork = ie + *m;
  4453. /* Perform bidiagonal QR iteration, computing right
  4454. singular vectors of L in WORK(IR) (Workspace: need
  4455. M*M+BDSPAC) */
  4456. NUMlapack_dbdsqr ("U", m, m, &c__0, &c__0, &s[1], &work[ie], &work[ir], &ldwrkr, dum, &c__1,
  4457. dum, &c__1, &work[iwork], info);
  4458. iu = ie + *m;
  4459. /* Multiply right singular vectors of L in WORK(IR) by Q
  4460. in A, storing result in WORK(IU) and copying to A
  4461. (Workspace: need M*M+2*M, prefer M*M+M*N+M) */
  4462. i__2 = *n;
  4463. i__3 = chunk;
  4464. for (i__ = 1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3) {
  4465. /* Computing MIN */
  4466. i__4 = *n - i__ + 1;
  4467. blk = MIN (i__4, chunk);
  4468. NUMblas_dgemm ("N", "N", m, &blk, m, &c_b438, &work[ir], &ldwrkr, &a_ref (1, i__), lda,
  4469. &c_b416, &work[iu], &ldwrku);
  4470. NUMlapack_dlacpy ("F", m, &blk, &work[iu], &ldwrku, &a_ref (1, i__), lda);
  4471. /* L30: */
  4472. }
  4473. } else {
  4474. /* Insufficient workspace for a fast algorithm */
  4475. ie = 1;
  4476. itauq = ie + *m;
  4477. itaup = itauq + *m;
  4478. iwork = itaup + *m;
  4479. /* Bidiagonalize A (Workspace: need 3*M+N, prefer
  4480. 3*M+(M+N)*NB) */
  4481. i__3 = *lwork - iwork + 1;
  4482. NUMlapack_dgebrd (m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  4483. &work[iwork], &i__3, &ierr);
  4484. /* Generate right vectors bidiagonalizing A (Workspace:
  4485. need 4*M, prefer 3*M+M*NB) */
  4486. i__3 = *lwork - iwork + 1;
  4487. NUMlapack_dorgbr ("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[iwork], &i__3,
  4488. &ierr);
  4489. iwork = ie + *m;
  4490. /* Perform bidiagonal QR iteration, computing right
  4491. singular vectors of A in A (Workspace: need BDSPAC) */
  4492. NUMlapack_dbdsqr ("L", m, n, &c__0, &c__0, &s[1], &work[ie], &a[a_offset], lda, dum, &c__1,
  4493. dum, &c__1, &work[iwork], info);
  4494. }
  4495. } else if (wntvo && wntuas) {
  4496. /* Path 3t(N much larger than M, JOBU='S' or 'A', JOBVT='O')
  4497. M right singular vectors to be overwritten on A and M left
  4498. singular vectors to be computed in U
  4499. Computing MAX */
  4500. i__3 = *m << 2;
  4501. if (*lwork >= *m * *m + MAX (i__3, bdspac)) {
  4502. /* Sufficient workspace for a fast algorithm */
  4503. ir = 1;
  4504. /* Computing MAX */
  4505. i__3 = wrkbl, i__2 = *lda * *n + *m;
  4506. if (*lwork >= MAX (i__3, i__2) + *lda * *m) {
  4507. /* WORK(IU) is LDA by N and WORK(IR) is LDA by M */
  4508. ldwrku = *lda;
  4509. chunk = *n;
  4510. ldwrkr = *lda;
  4511. } else { /* if(complicated condition) */
  4512. /* Computing MAX */
  4513. i__3 = wrkbl, i__2 = *lda * *n + *m;
  4514. if (*lwork >= MAX (i__3, i__2) + *m * *m) {
  4515. /* WORK(IU) is LDA by N and WORK(IR) is M by M */
  4516. ldwrku = *lda;
  4517. chunk = *n;
  4518. ldwrkr = *m;
  4519. } else {
  4520. /* WORK(IU) is M by CHUNK and WORK(IR) is M by M */
  4521. ldwrku = *m;
  4522. chunk = (*lwork - *m * *m - *m) / *m;
  4523. ldwrkr = *m;
  4524. }
  4525. }
  4526. itau = ir + ldwrkr * *m;
  4527. iwork = itau + *m;
  4528. /* Compute A=L*Q (Workspace: need M*M+2*M, prefer
  4529. M*M+M+M*NB) */
  4530. i__3 = *lwork - iwork + 1;
  4531. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__3, &ierr);
  4532. /* Copy L to U, zeroing about above it */
  4533. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
  4534. i__3 = *m - 1;
  4535. i__2 = *m - 1;
  4536. NUMlapack_dlaset ("U", &i__3, &i__2, &c_b416, &c_b416, &u_ref (1, 2), ldu);
  4537. /* Generate Q in A (Workspace: need M*M+2*M, prefer
  4538. M*M+M+M*NB) */
  4539. i__3 = *lwork - iwork + 1;
  4540. NUMlapack_dorglq (m, n, m, &a[a_offset], lda, &work[itau], &work[iwork], &i__3, &ierr);
  4541. ie = itau;
  4542. itauq = ie + *m;
  4543. itaup = itauq + *m;
  4544. iwork = itaup + *m;
  4545. /* Bidiagonalize L in U, copying result to WORK(IR)
  4546. (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
  4547. i__3 = *lwork - iwork + 1;
  4548. NUMlapack_dgebrd (m, m, &u[u_offset], ldu, &s[1], &work[ie], &work[itauq], &work[itaup],
  4549. &work[iwork], &i__3, &ierr);
  4550. NUMlapack_dlacpy ("U", m, m, &u[u_offset], ldu, &work[ir], &ldwrkr);
  4551. /* Generate right vectors bidiagonalizing L in WORK(IR)
  4552. (Workspace: need M*M+4*M-1, prefer M*M+3*M+(M-1)*NB) */
  4553. i__3 = *lwork - iwork + 1;
  4554. NUMlapack_dorgbr ("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &work[iwork], &i__3,
  4555. &ierr);
  4556. /* Generate left vectors bidiagonalizing L in U
  4557. (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB) */
  4558. i__3 = *lwork - iwork + 1;
  4559. NUMlapack_dorgbr ("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &work[iwork], &i__3,
  4560. &ierr);
  4561. iwork = ie + *m;
  4562. /* Perform bidiagonal QR iteration, computing left
  4563. singular vectors of L in U, and computing right
  4564. singular vectors of L in WORK(IR) (Workspace: need
  4565. M*M+BDSPAC) */
  4566. NUMlapack_dbdsqr ("U", m, m, m, &c__0, &s[1], &work[ie], &work[ir], &ldwrkr, &u[u_offset],
  4567. ldu, dum, &c__1, &work[iwork], info);
  4568. iu = ie + *m;
  4569. /* Multiply right singular vectors of L in WORK(IR) by Q
  4570. in A, storing result in WORK(IU) and copying to A
  4571. (Workspace: need M*M+2*M, prefer M*M+M*N+M)) */
  4572. i__3 = *n;
  4573. i__2 = chunk;
  4574. for (i__ = 1; i__2 < 0 ? i__ >= i__3 : i__ <= i__3; i__ += i__2) {
  4575. /* Computing MIN */
  4576. i__4 = *n - i__ + 1;
  4577. blk = MIN (i__4, chunk);
  4578. NUMblas_dgemm ("N", "N", m, &blk, m, &c_b438, &work[ir], &ldwrkr, &a_ref (1, i__), lda,
  4579. &c_b416, &work[iu], &ldwrku);
  4580. NUMlapack_dlacpy ("F", m, &blk, &work[iu], &ldwrku, &a_ref (1, i__), lda);
  4581. /* L40: */
  4582. }
  4583. } else {
  4584. /* Insufficient workspace for a fast algorithm */
  4585. itau = 1;
  4586. iwork = itau + *m;
  4587. /* Compute A=L*Q (Workspace: need 2*M, prefer M+M*NB) */
  4588. i__2 = *lwork - iwork + 1;
  4589. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4590. /* Copy L to U, zeroing out above it */
  4591. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
  4592. i__2 = *m - 1;
  4593. i__3 = *m - 1;
  4594. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &u_ref (1, 2), ldu);
  4595. /* Generate Q in A (Workspace: need 2*M, prefer M+M*NB) */
  4596. i__2 = *lwork - iwork + 1;
  4597. NUMlapack_dorglq (m, n, m, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4598. ie = itau;
  4599. itauq = ie + *m;
  4600. itaup = itauq + *m;
  4601. iwork = itaup + *m;
  4602. /* Bidiagonalize L in U (Workspace: need 4*M, prefer
  4603. 3*M+2*M*NB) */
  4604. i__2 = *lwork - iwork + 1;
  4605. NUMlapack_dgebrd (m, m, &u[u_offset], ldu, &s[1], &work[ie], &work[itauq], &work[itaup],
  4606. &work[iwork], &i__2, &ierr);
  4607. /* Multiply right vectors bidiagonalizing L by Q in A
  4608. (Workspace: need 3*M+N, prefer 3*M+N*NB) */
  4609. i__2 = *lwork - iwork + 1;
  4610. NUMlapack_dormbr ("P", "L", "T", m, n, m, &u[u_offset], ldu, &work[itaup], &a[a_offset],
  4611. lda, &work[iwork], &i__2, &ierr);
  4612. /* Generate left vectors bidiagonalizing L in U
  4613. (Workspace: need 4*M, prefer 3*M+M*NB) */
  4614. i__2 = *lwork - iwork + 1;
  4615. NUMlapack_dorgbr ("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &work[iwork], &i__2,
  4616. &ierr);
  4617. iwork = ie + *m;
  4618. /* Perform bidiagonal QR iteration, computing left
  4619. singular vectors of A in U and computing right
  4620. singular vectors of A in A (Workspace: need BDSPAC) */
  4621. NUMlapack_dbdsqr ("U", m, n, m, &c__0, &s[1], &work[ie], &a[a_offset], lda, &u[u_offset],
  4622. ldu, dum, &c__1, &work[iwork], info);
  4623. }
  4624. } else if (wntvs) {
  4625. if (wntun) {
  4626. /* Path 4t(N much larger than M, JOBU='N', JOBVT='S') M
  4627. right singular vectors to be computed in VT and no
  4628. left singular vectors to be computed
  4629. Computing MAX */
  4630. i__2 = *m << 2;
  4631. if (*lwork >= *m * *m + MAX (i__2, bdspac)) {
  4632. /* Sufficient workspace for a fast algorithm */
  4633. ir = 1;
  4634. if (*lwork >= wrkbl + *lda * *m) {
  4635. /* WORK(IR) is LDA by M */
  4636. ldwrkr = *lda;
  4637. } else {
  4638. /* WORK(IR) is M by M */
  4639. ldwrkr = *m;
  4640. }
  4641. itau = ir + ldwrkr * *m;
  4642. iwork = itau + *m;
  4643. /* Compute A=L*Q (Workspace: need M*M+2*M, prefer
  4644. M*M+M+M*NB) */
  4645. i__2 = *lwork - iwork + 1;
  4646. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4647. /* Copy L to WORK(IR), zeroing out above it */
  4648. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &work[ir], &ldwrkr);
  4649. i__2 = *m - 1;
  4650. i__3 = *m - 1;
  4651. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &work[ir + ldwrkr], &ldwrkr);
  4652. /* Generate Q in A (Workspace: need M*M+2*M, prefer
  4653. M*M+M+M*NB) */
  4654. i__2 = *lwork - iwork + 1;
  4655. NUMlapack_dorglq (m, n, m, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4656. ie = itau;
  4657. itauq = ie + *m;
  4658. itaup = itauq + *m;
  4659. iwork = itaup + *m;
  4660. /* Bidiagonalize L in WORK(IR) (Workspace: need
  4661. M*M+4*M, prefer M*M+3*M+2*M*NB) */
  4662. i__2 = *lwork - iwork + 1;
  4663. NUMlapack_dgebrd (m, m, &work[ir], &ldwrkr, &s[1], &work[ie], &work[itauq],
  4664. &work[itaup], &work[iwork], &i__2, &ierr);
  4665. /* Generate right vectors bidiagonalizing L in
  4666. WORK(IR) (Workspace: need M*M+4*M, prefer
  4667. M*M+3*M+(M-1)*NB) */
  4668. i__2 = *lwork - iwork + 1;
  4669. NUMlapack_dorgbr ("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &work[iwork], &i__2,
  4670. &ierr);
  4671. iwork = ie + *m;
  4672. /* Perform bidiagonal QR iteration, computing right
  4673. singular vectors of L in WORK(IR) (Workspace: need
  4674. M*M+BDSPAC) */
  4675. NUMlapack_dbdsqr ("U", m, m, &c__0, &c__0, &s[1], &work[ie], &work[ir], &ldwrkr, dum,
  4676. &c__1, dum, &c__1, &work[iwork], info);
  4677. /* Multiply right singular vectors of L in WORK(IR)
  4678. by Q in A, storing result in VT (Workspace: need
  4679. M*M) */
  4680. NUMblas_dgemm ("N", "N", m, n, m, &c_b438, &work[ir], &ldwrkr, &a[a_offset], lda, &c_b416,
  4681. &vt[vt_offset], ldvt);
  4682. } else {
  4683. /* Insufficient workspace for a fast algorithm */
  4684. itau = 1;
  4685. iwork = itau + *m;
  4686. /* Compute A=L*Q (Workspace: need 2*M, prefer M+M*NB)
  4687. */
  4688. i__2 = *lwork - iwork + 1;
  4689. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4690. /* Copy result to VT */
  4691. NUMlapack_dlacpy ("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  4692. /* Generate Q in VT (Workspace: need 2*M, prefer
  4693. M+M*NB) */
  4694. i__2 = *lwork - iwork + 1;
  4695. NUMlapack_dorglq (m, n, m, &vt[vt_offset], ldvt, &work[itau], &work[iwork], &i__2,
  4696. &ierr);
  4697. ie = itau;
  4698. itauq = ie + *m;
  4699. itaup = itauq + *m;
  4700. iwork = itaup + *m;
  4701. /* Zero out above L in A */
  4702. i__2 = *m - 1;
  4703. i__3 = *m - 1;
  4704. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &a_ref (1, 2), lda);
  4705. /* Bidiagonalize L in A (Workspace: need 4*M, prefer
  4706. 3*M+2*M*NB) */
  4707. i__2 = *lwork - iwork + 1;
  4708. NUMlapack_dgebrd (m, m, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  4709. &work[iwork], &i__2, &ierr);
  4710. /* Multiply right vectors bidiagonalizing L by Q in
  4711. VT (Workspace: need 3*M+N, prefer 3*M+N*NB) */
  4712. i__2 = *lwork - iwork + 1;
  4713. NUMlapack_dormbr ("P", "L", "T", m, n, m, &a[a_offset], lda, &work[itaup],
  4714. &vt[vt_offset], ldvt, &work[iwork], &i__2, &ierr);
  4715. iwork = ie + *m;
  4716. /* Perform bidiagonal QR iteration, computing right
  4717. singular vectors of A in VT (Workspace: need
  4718. BDSPAC) */
  4719. NUMlapack_dbdsqr ("U", m, n, &c__0, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt, dum,
  4720. &c__1, dum, &c__1, &work[iwork], info);
  4721. }
  4722. } else if (wntuo) {
  4723. /* Path 5t(N much larger than M, JOBU='O', JOBVT='S') M
  4724. right singular vectors to be computed in VT and M left
  4725. singular vectors to be overwritten on A
  4726. Computing MAX */
  4727. i__2 = *m << 2;
  4728. if (*lwork >= (*m << 1) * *m + MAX (i__2, bdspac)) {
  4729. /* Sufficient workspace for a fast algorithm */
  4730. iu = 1;
  4731. if (*lwork >= wrkbl + (*lda << 1) * *m) {
  4732. /* WORK(IU) is LDA by M and WORK(IR) is LDA by M */
  4733. ldwrku = *lda;
  4734. ir = iu + ldwrku * *m;
  4735. ldwrkr = *lda;
  4736. } else if (*lwork >= wrkbl + (*lda + *m) * *m) {
  4737. /* WORK(IU) is LDA by M and WORK(IR) is M by M */
  4738. ldwrku = *lda;
  4739. ir = iu + ldwrku * *m;
  4740. ldwrkr = *m;
  4741. } else {
  4742. /* WORK(IU) is M by M and WORK(IR) is M by M */
  4743. ldwrku = *m;
  4744. ir = iu + ldwrku * *m;
  4745. ldwrkr = *m;
  4746. }
  4747. itau = ir + ldwrkr * *m;
  4748. iwork = itau + *m;
  4749. /* Compute A=L*Q (Workspace: need 2*M*M+2*M, prefer
  4750. 2*M*M+M+M*NB) */
  4751. i__2 = *lwork - iwork + 1;
  4752. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4753. /* Copy L to WORK(IU), zeroing out below it */
  4754. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &work[iu], &ldwrku);
  4755. i__2 = *m - 1;
  4756. i__3 = *m - 1;
  4757. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &work[iu + ldwrku], &ldwrku);
  4758. /* Generate Q in A (Workspace: need 2*M*M+2*M, prefer
  4759. 2*M*M+M+M*NB) */
  4760. i__2 = *lwork - iwork + 1;
  4761. NUMlapack_dorglq (m, n, m, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4762. ie = itau;
  4763. itauq = ie + *m;
  4764. itaup = itauq + *m;
  4765. iwork = itaup + *m;
  4766. /* Bidiagonalize L in WORK(IU), copying result to
  4767. WORK(IR) (Workspace: need 2*M*M+4*M, prefer
  4768. 2*M*M+3*M+2*M*NB) */
  4769. i__2 = *lwork - iwork + 1;
  4770. NUMlapack_dgebrd (m, m, &work[iu], &ldwrku, &s[1], &work[ie], &work[itauq],
  4771. &work[itaup], &work[iwork], &i__2, &ierr);
  4772. NUMlapack_dlacpy ("L", m, m, &work[iu], &ldwrku, &work[ir], &ldwrkr);
  4773. /* Generate right bidiagonalizing vectors in WORK(IU)
  4774. (Workspace: need 2*M*M+4*M-1, prefer
  4775. 2*M*M+3*M+(M-1)*NB) */
  4776. i__2 = *lwork - iwork + 1;
  4777. NUMlapack_dorgbr ("P", m, m, m, &work[iu], &ldwrku, &work[itaup], &work[iwork], &i__2,
  4778. &ierr);
  4779. /* Generate left bidiagonalizing vectors in WORK(IR)
  4780. (Workspace: need 2*M*M+4*M, prefer 2*M*M+3*M+M*NB)
  4781. */
  4782. i__2 = *lwork - iwork + 1;
  4783. NUMlapack_dorgbr ("Q", m, m, m, &work[ir], &ldwrkr, &work[itauq], &work[iwork], &i__2,
  4784. &ierr);
  4785. iwork = ie + *m;
  4786. /* Perform bidiagonal QR iteration, computing left
  4787. singular vectors of L in WORK(IR) and computing
  4788. right singular vectors of L in WORK(IU)
  4789. (Workspace: need 2*M*M+BDSPAC) */
  4790. NUMlapack_dbdsqr ("U", m, m, m, &c__0, &s[1], &work[ie], &work[iu], &ldwrku, &work[ir],
  4791. &ldwrkr, dum, &c__1, &work[iwork], info);
  4792. /* Multiply right singular vectors of L in WORK(IU)
  4793. by Q in A, storing result in VT (Workspace: need
  4794. M*M) */
  4795. NUMblas_dgemm ("N", "N", m, n, m, &c_b438, &work[iu], &ldwrku, &a[a_offset], lda, &c_b416,
  4796. &vt[vt_offset], ldvt);
  4797. /* Copy left singular vectors of L to A (Workspace:
  4798. need M*M) */
  4799. NUMlapack_dlacpy ("F", m, m, &work[ir], &ldwrkr, &a[a_offset], lda);
  4800. } else {
  4801. /* Insufficient workspace for a fast algorithm */
  4802. itau = 1;
  4803. iwork = itau + *m;
  4804. /* Compute A=L*Q, copying result to VT (Workspace:
  4805. need 2*M, prefer M+M*NB) */
  4806. i__2 = *lwork - iwork + 1;
  4807. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4808. NUMlapack_dlacpy ("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  4809. /* Generate Q in VT (Workspace: need 2*M, prefer
  4810. M+M*NB) */
  4811. i__2 = *lwork - iwork + 1;
  4812. NUMlapack_dorglq (m, n, m, &vt[vt_offset], ldvt, &work[itau], &work[iwork], &i__2,
  4813. &ierr);
  4814. ie = itau;
  4815. itauq = ie + *m;
  4816. itaup = itauq + *m;
  4817. iwork = itaup + *m;
  4818. /* Zero out above L in A */
  4819. i__2 = *m - 1;
  4820. i__3 = *m - 1;
  4821. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &a_ref (1, 2), lda);
  4822. /* Bidiagonalize L in A (Workspace: need 4*M, prefer
  4823. 3*M+2*M*NB) */
  4824. i__2 = *lwork - iwork + 1;
  4825. NUMlapack_dgebrd (m, m, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  4826. &work[iwork], &i__2, &ierr);
  4827. /* Multiply right vectors bidiagonalizing L by Q in
  4828. VT (Workspace: need 3*M+N, prefer 3*M+N*NB) */
  4829. i__2 = *lwork - iwork + 1;
  4830. NUMlapack_dormbr ("P", "L", "T", m, n, m, &a[a_offset], lda, &work[itaup],
  4831. &vt[vt_offset], ldvt, &work[iwork], &i__2, &ierr);
  4832. /* Generate left bidiagonalizing vectors of L in A
  4833. (Workspace: need 4*M, prefer 3*M+M*NB) */
  4834. i__2 = *lwork - iwork + 1;
  4835. NUMlapack_dorgbr ("Q", m, m, m, &a[a_offset], lda, &work[itauq], &work[iwork], &i__2,
  4836. &ierr);
  4837. iwork = ie + *m;
  4838. /* Perform bidiagonal QR iteration, compute left
  4839. singular vectors of A in A and compute right
  4840. singular vectors of A in VT (Workspace: need
  4841. BDSPAC) */
  4842. NUMlapack_dbdsqr ("U", m, n, m, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  4843. &a[a_offset], lda, dum, &c__1, &work[iwork], info);
  4844. }
  4845. } else if (wntuas) {
  4846. /* Path 6t(N much larger than M, JOBU='S' or 'A',
  4847. JOBVT='S') M right singular vectors to be computed in
  4848. VT and M left singular vectors to be computed in U
  4849. Computing MAX */
  4850. i__2 = *m << 2;
  4851. if (*lwork >= *m * *m + MAX (i__2, bdspac)) {
  4852. /* Sufficient workspace for a fast algorithm */
  4853. iu = 1;
  4854. if (*lwork >= wrkbl + *lda * *m) {
  4855. /* WORK(IU) is LDA by N */
  4856. ldwrku = *lda;
  4857. } else {
  4858. /* WORK(IU) is LDA by M */
  4859. ldwrku = *m;
  4860. }
  4861. itau = iu + ldwrku * *m;
  4862. iwork = itau + *m;
  4863. /* Compute A=L*Q (Workspace: need M*M+2*M, prefer
  4864. M*M+M+M*NB) */
  4865. i__2 = *lwork - iwork + 1;
  4866. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4867. /* Copy L to WORK(IU), zeroing out above it */
  4868. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &work[iu], &ldwrku);
  4869. i__2 = *m - 1;
  4870. i__3 = *m - 1;
  4871. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &work[iu + ldwrku], &ldwrku);
  4872. /* Generate Q in A (Workspace: need M*M+2*M, prefer
  4873. M*M+M+M*NB) */
  4874. i__2 = *lwork - iwork + 1;
  4875. NUMlapack_dorglq (m, n, m, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4876. ie = itau;
  4877. itauq = ie + *m;
  4878. itaup = itauq + *m;
  4879. iwork = itaup + *m;
  4880. /* Bidiagonalize L in WORK(IU), copying result to U
  4881. (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
  4882. i__2 = *lwork - iwork + 1;
  4883. NUMlapack_dgebrd (m, m, &work[iu], &ldwrku, &s[1], &work[ie], &work[itauq],
  4884. &work[itaup], &work[iwork], &i__2, &ierr);
  4885. NUMlapack_dlacpy ("L", m, m, &work[iu], &ldwrku, &u[u_offset], ldu);
  4886. /* Generate right bidiagonalizing vectors in WORK(IU)
  4887. (Workspace: need M*M+4*M-1, prefer
  4888. M*M+3*M+(M-1)*NB) */
  4889. i__2 = *lwork - iwork + 1;
  4890. NUMlapack_dorgbr ("P", m, m, m, &work[iu], &ldwrku, &work[itaup], &work[iwork], &i__2,
  4891. &ierr);
  4892. /* Generate left bidiagonalizing vectors in U
  4893. (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB) */
  4894. i__2 = *lwork - iwork + 1;
  4895. NUMlapack_dorgbr ("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &work[iwork], &i__2,
  4896. &ierr);
  4897. iwork = ie + *m;
  4898. /* Perform bidiagonal QR iteration, computing left
  4899. singular vectors of L in U and computing right
  4900. singular vectors of L in WORK(IU) (Workspace: need
  4901. M*M+BDSPAC) */
  4902. NUMlapack_dbdsqr ("U", m, m, m, &c__0, &s[1], &work[ie], &work[iu], &ldwrku,
  4903. &u[u_offset], ldu, dum, &c__1, &work[iwork], info);
  4904. /* Multiply right singular vectors of L in WORK(IU)
  4905. by Q in A, storing result in VT (Workspace: need
  4906. M*M) */
  4907. NUMblas_dgemm ("N", "N", m, n, m, &c_b438, &work[iu], &ldwrku, &a[a_offset], lda, &c_b416,
  4908. &vt[vt_offset], ldvt);
  4909. } else {
  4910. /* Insufficient workspace for a fast algorithm */
  4911. itau = 1;
  4912. iwork = itau + *m;
  4913. /* Compute A=L*Q, copying result to VT (Workspace:
  4914. need 2*M, prefer M+M*NB) */
  4915. i__2 = *lwork - iwork + 1;
  4916. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4917. NUMlapack_dlacpy ("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  4918. /* Generate Q in VT (Workspace: need 2*M, prefer
  4919. M+M*NB) */
  4920. i__2 = *lwork - iwork + 1;
  4921. NUMlapack_dorglq (m, n, m, &vt[vt_offset], ldvt, &work[itau], &work[iwork], &i__2,
  4922. &ierr);
  4923. /* Copy L to U, zeroing out above it */
  4924. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
  4925. i__2 = *m - 1;
  4926. i__3 = *m - 1;
  4927. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &u_ref (1, 2), ldu);
  4928. ie = itau;
  4929. itauq = ie + *m;
  4930. itaup = itauq + *m;
  4931. iwork = itaup + *m;
  4932. /* Bidiagonalize L in U (Workspace: need 4*M, prefer
  4933. 3*M+2*M*NB) */
  4934. i__2 = *lwork - iwork + 1;
  4935. NUMlapack_dgebrd (m, m, &u[u_offset], ldu, &s[1], &work[ie], &work[itauq], &work[itaup],
  4936. &work[iwork], &i__2, &ierr);
  4937. /* Multiply right bidiagonalizing vectors in U by Q
  4938. in VT (Workspace: need 3*M+N, prefer 3*M+N*NB) */
  4939. i__2 = *lwork - iwork + 1;
  4940. NUMlapack_dormbr ("P", "L", "T", m, n, m, &u[u_offset], ldu, &work[itaup],
  4941. &vt[vt_offset], ldvt, &work[iwork], &i__2, &ierr);
  4942. /* Generate left bidiagonalizing vectors in U
  4943. (Workspace: need 4*M, prefer 3*M+M*NB) */
  4944. i__2 = *lwork - iwork + 1;
  4945. NUMlapack_dorgbr ("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &work[iwork], &i__2,
  4946. &ierr);
  4947. iwork = ie + *m;
  4948. /* Perform bidiagonal QR iteration, computing left
  4949. singular vectors of A in U and computing right
  4950. singular vectors of A in VT (Workspace: need
  4951. BDSPAC) */
  4952. NUMlapack_dbdsqr ("U", m, n, m, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  4953. &u[u_offset], ldu, dum, &c__1, &work[iwork], info);
  4954. }
  4955. }
  4956. } else if (wntva) {
  4957. if (wntun) {
  4958. /* Path 7t(N much larger than M, JOBU='N', JOBVT='A') N
  4959. right singular vectors to be computed in VT and no
  4960. left singular vectors to be computed
  4961. Computing MAX */
  4962. i__2 = *n + *m, i__3 = *m << 2, i__2 = MAX (i__2, i__3);
  4963. if (*lwork >= *m * *m + MAX (i__2, bdspac)) {
  4964. /* Sufficient workspace for a fast algorithm */
  4965. ir = 1;
  4966. if (*lwork >= wrkbl + *lda * *m) {
  4967. /* WORK(IR) is LDA by M */
  4968. ldwrkr = *lda;
  4969. } else {
  4970. /* WORK(IR) is M by M */
  4971. ldwrkr = *m;
  4972. }
  4973. itau = ir + ldwrkr * *m;
  4974. iwork = itau + *m;
  4975. /* Compute A=L*Q, copying result to VT (Workspace:
  4976. need M*M+2*M, prefer M*M+M+M*NB) */
  4977. i__2 = *lwork - iwork + 1;
  4978. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  4979. NUMlapack_dlacpy ("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  4980. /* Copy L to WORK(IR), zeroing out above it */
  4981. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &work[ir], &ldwrkr);
  4982. i__2 = *m - 1;
  4983. i__3 = *m - 1;
  4984. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &work[ir + ldwrkr], &ldwrkr);
  4985. /* Generate Q in VT (Workspace: need M*M+M+N, prefer
  4986. M*M+M+N*NB) */
  4987. i__2 = *lwork - iwork + 1;
  4988. NUMlapack_dorglq (n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[iwork], &i__2,
  4989. &ierr);
  4990. ie = itau;
  4991. itauq = ie + *m;
  4992. itaup = itauq + *m;
  4993. iwork = itaup + *m;
  4994. /* Bidiagonalize L in WORK(IR) (Workspace: need
  4995. M*M+4*M, prefer M*M+3*M+2*M*NB) */
  4996. i__2 = *lwork - iwork + 1;
  4997. NUMlapack_dgebrd (m, m, &work[ir], &ldwrkr, &s[1], &work[ie], &work[itauq],
  4998. &work[itaup], &work[iwork], &i__2, &ierr);
  4999. /* Generate right bidiagonalizing vectors in WORK(IR)
  5000. (Workspace: need M*M+4*M-1, prefer
  5001. M*M+3*M+(M-1)*NB) */
  5002. i__2 = *lwork - iwork + 1;
  5003. NUMlapack_dorgbr ("P", m, m, m, &work[ir], &ldwrkr, &work[itaup], &work[iwork], &i__2,
  5004. &ierr);
  5005. iwork = ie + *m;
  5006. /* Perform bidiagonal QR iteration, computing right
  5007. singular vectors of L in WORK(IR) (Workspace: need
  5008. M*M+BDSPAC) */
  5009. NUMlapack_dbdsqr ("U", m, m, &c__0, &c__0, &s[1], &work[ie], &work[ir], &ldwrkr, dum,
  5010. &c__1, dum, &c__1, &work[iwork], info);
  5011. /* Multiply right singular vectors of L in WORK(IR)
  5012. by Q in VT, storing result in A (Workspace: need
  5013. M*M) */
  5014. NUMblas_dgemm ("N", "N", m, n, m, &c_b438, &work[ir], &ldwrkr, &vt[vt_offset], ldvt, &c_b416,
  5015. &a[a_offset], lda);
  5016. /* Copy right singular vectors of A from A to VT */
  5017. NUMlapack_dlacpy ("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  5018. } else {
  5019. /* Insufficient workspace for a fast algorithm */
  5020. itau = 1;
  5021. iwork = itau + *m;
  5022. /* Compute A=L*Q, copying result to VT (Workspace:
  5023. need 2*M, prefer M+M*NB) */
  5024. i__2 = *lwork - iwork + 1;
  5025. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  5026. NUMlapack_dlacpy ("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  5027. /* Generate Q in VT (Workspace: need M+N, prefer
  5028. M+N*NB) */
  5029. i__2 = *lwork - iwork + 1;
  5030. NUMlapack_dorglq (n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[iwork], &i__2,
  5031. &ierr);
  5032. ie = itau;
  5033. itauq = ie + *m;
  5034. itaup = itauq + *m;
  5035. iwork = itaup + *m;
  5036. /* Zero out above L in A */
  5037. i__2 = *m - 1;
  5038. i__3 = *m - 1;
  5039. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &a_ref (1, 2), lda);
  5040. /* Bidiagonalize L in A (Workspace: need 4*M, prefer
  5041. 3*M+2*M*NB) */
  5042. i__2 = *lwork - iwork + 1;
  5043. NUMlapack_dgebrd (m, m, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  5044. &work[iwork], &i__2, &ierr);
  5045. /* Multiply right bidiagonalizing vectors in A by Q
  5046. in VT (Workspace: need 3*M+N, prefer 3*M+N*NB) */
  5047. i__2 = *lwork - iwork + 1;
  5048. NUMlapack_dormbr ("P", "L", "T", m, n, m, &a[a_offset], lda, &work[itaup],
  5049. &vt[vt_offset], ldvt, &work[iwork], &i__2, &ierr);
  5050. iwork = ie + *m;
  5051. /* Perform bidiagonal QR iteration, computing right
  5052. singular vectors of A in VT (Workspace: need
  5053. BDSPAC) */
  5054. NUMlapack_dbdsqr ("U", m, n, &c__0, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt, dum,
  5055. &c__1, dum, &c__1, &work[iwork], info);
  5056. }
  5057. } else if (wntuo) {
  5058. /* Path 8t(N much larger than M, JOBU='O', JOBVT='A') N
  5059. right singular vectors to be computed in VT and M left
  5060. singular vectors to be overwritten on A
  5061. Computing MAX */
  5062. i__2 = *n + *m, i__3 = *m << 2, i__2 = MAX (i__2, i__3);
  5063. if (*lwork >= (*m << 1) * *m + MAX (i__2, bdspac)) {
  5064. /* Sufficient workspace for a fast algorithm */
  5065. iu = 1;
  5066. if (*lwork >= wrkbl + (*lda << 1) * *m) {
  5067. /* WORK(IU) is LDA by M and WORK(IR) is LDA by M */
  5068. ldwrku = *lda;
  5069. ir = iu + ldwrku * *m;
  5070. ldwrkr = *lda;
  5071. } else if (*lwork >= wrkbl + (*lda + *m) * *m) {
  5072. /* WORK(IU) is LDA by M and WORK(IR) is M by M */
  5073. ldwrku = *lda;
  5074. ir = iu + ldwrku * *m;
  5075. ldwrkr = *m;
  5076. } else {
  5077. /* WORK(IU) is M by M and WORK(IR) is M by M */
  5078. ldwrku = *m;
  5079. ir = iu + ldwrku * *m;
  5080. ldwrkr = *m;
  5081. }
  5082. itau = ir + ldwrkr * *m;
  5083. iwork = itau + *m;
  5084. /* Compute A=L*Q, copying result to VT (Workspace:
  5085. need 2*M*M+2*M, prefer 2*M*M+M+M*NB) */
  5086. i__2 = *lwork - iwork + 1;
  5087. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  5088. NUMlapack_dlacpy ("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  5089. /* Generate Q in VT (Workspace: need 2*M*M+M+N,
  5090. prefer 2*M*M+M+N*NB) */
  5091. i__2 = *lwork - iwork + 1;
  5092. NUMlapack_dorglq (n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[iwork], &i__2,
  5093. &ierr);
  5094. /* Copy L to WORK(IU), zeroing out above it */
  5095. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &work[iu], &ldwrku);
  5096. i__2 = *m - 1;
  5097. i__3 = *m - 1;
  5098. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &work[iu + ldwrku], &ldwrku);
  5099. ie = itau;
  5100. itauq = ie + *m;
  5101. itaup = itauq + *m;
  5102. iwork = itaup + *m;
  5103. /* Bidiagonalize L in WORK(IU), copying result to
  5104. WORK(IR) (Workspace: need 2*M*M+4*M, prefer
  5105. 2*M*M+3*M+2*M*NB) */
  5106. i__2 = *lwork - iwork + 1;
  5107. NUMlapack_dgebrd (m, m, &work[iu], &ldwrku, &s[1], &work[ie], &work[itauq],
  5108. &work[itaup], &work[iwork], &i__2, &ierr);
  5109. NUMlapack_dlacpy ("L", m, m, &work[iu], &ldwrku, &work[ir], &ldwrkr);
  5110. /* Generate right bidiagonalizing vectors in WORK(IU)
  5111. (Workspace: need 2*M*M+4*M-1, prefer
  5112. 2*M*M+3*M+(M-1)*NB) */
  5113. i__2 = *lwork - iwork + 1;
  5114. NUMlapack_dorgbr ("P", m, m, m, &work[iu], &ldwrku, &work[itaup], &work[iwork], &i__2,
  5115. &ierr);
  5116. /* Generate left bidiagonalizing vectors in WORK(IR)
  5117. (Workspace: need 2*M*M+4*M, prefer 2*M*M+3*M+M*NB)
  5118. */
  5119. i__2 = *lwork - iwork + 1;
  5120. NUMlapack_dorgbr ("Q", m, m, m, &work[ir], &ldwrkr, &work[itauq], &work[iwork], &i__2,
  5121. &ierr);
  5122. iwork = ie + *m;
  5123. /* Perform bidiagonal QR iteration, computing left
  5124. singular vectors of L in WORK(IR) and computing
  5125. right singular vectors of L in WORK(IU)
  5126. (Workspace: need 2*M*M+BDSPAC) */
  5127. NUMlapack_dbdsqr ("U", m, m, m, &c__0, &s[1], &work[ie], &work[iu], &ldwrku, &work[ir],
  5128. &ldwrkr, dum, &c__1, &work[iwork], info);
  5129. /* Multiply right singular vectors of L in WORK(IU)
  5130. by Q in VT, storing result in A (Workspace: need
  5131. M*M) */
  5132. NUMblas_dgemm ("N", "N", m, n, m, &c_b438, &work[iu], &ldwrku, &vt[vt_offset], ldvt, &c_b416,
  5133. &a[a_offset], lda);
  5134. /* Copy right singular vectors of A from A to VT */
  5135. NUMlapack_dlacpy ("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  5136. /* Copy left singular vectors of A from WORK(IR) to A
  5137. */
  5138. NUMlapack_dlacpy ("F", m, m, &work[ir], &ldwrkr, &a[a_offset], lda);
  5139. } else {
  5140. /* Insufficient workspace for a fast algorithm */
  5141. itau = 1;
  5142. iwork = itau + *m;
  5143. /* Compute A=L*Q, copying result to VT (Workspace:
  5144. need 2*M, prefer M+M*NB) */
  5145. i__2 = *lwork - iwork + 1;
  5146. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  5147. NUMlapack_dlacpy ("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  5148. /* Generate Q in VT (Workspace: need M+N, prefer
  5149. M+N*NB) */
  5150. i__2 = *lwork - iwork + 1;
  5151. NUMlapack_dorglq (n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[iwork], &i__2,
  5152. &ierr);
  5153. ie = itau;
  5154. itauq = ie + *m;
  5155. itaup = itauq + *m;
  5156. iwork = itaup + *m;
  5157. /* Zero out above L in A */
  5158. i__2 = *m - 1;
  5159. i__3 = *m - 1;
  5160. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &a_ref (1, 2), lda);
  5161. /* Bidiagonalize L in A (Workspace: need 4*M, prefer
  5162. 3*M+2*M*NB) */
  5163. i__2 = *lwork - iwork + 1;
  5164. NUMlapack_dgebrd (m, m, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  5165. &work[iwork], &i__2, &ierr);
  5166. /* Multiply right bidiagonalizing vectors in A by Q
  5167. in VT (Workspace: need 3*M+N, prefer 3*M+N*NB) */
  5168. i__2 = *lwork - iwork + 1;
  5169. NUMlapack_dormbr ("P", "L", "T", m, n, m, &a[a_offset], lda, &work[itaup],
  5170. &vt[vt_offset], ldvt, &work[iwork], &i__2, &ierr);
  5171. /* Generate left bidiagonalizing vectors in A
  5172. (Workspace: need 4*M, prefer 3*M+M*NB) */
  5173. i__2 = *lwork - iwork + 1;
  5174. NUMlapack_dorgbr ("Q", m, m, m, &a[a_offset], lda, &work[itauq], &work[iwork], &i__2,
  5175. &ierr);
  5176. iwork = ie + *m;
  5177. /* Perform bidiagonal QR iteration, computing left
  5178. singular vectors of A in A and computing right
  5179. singular vectors of A in VT (Workspace: need
  5180. BDSPAC) */
  5181. NUMlapack_dbdsqr ("U", m, n, m, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  5182. &a[a_offset], lda, dum, &c__1, &work[iwork], info);
  5183. }
  5184. } else if (wntuas) {
  5185. /* Path 9t(N much larger than M, JOBU='S' or 'A',
  5186. JOBVT='A') N right singular vectors to be computed in
  5187. VT and M left singular vectors to be computed in U
  5188. Computing MAX */
  5189. i__2 = *n + *m, i__3 = *m << 2, i__2 = MAX (i__2, i__3);
  5190. if (*lwork >= *m * *m + MAX (i__2, bdspac)) {
  5191. /* Sufficient workspace for a fast algorithm */
  5192. iu = 1;
  5193. if (*lwork >= wrkbl + *lda * *m) {
  5194. /* WORK(IU) is LDA by M */
  5195. ldwrku = *lda;
  5196. } else {
  5197. /* WORK(IU) is M by M */
  5198. ldwrku = *m;
  5199. }
  5200. itau = iu + ldwrku * *m;
  5201. iwork = itau + *m;
  5202. /* Compute A=L*Q, copying result to VT (Workspace:
  5203. need M*M+2*M, prefer M*M+M+M*NB) */
  5204. i__2 = *lwork - iwork + 1;
  5205. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  5206. NUMlapack_dlacpy ("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  5207. /* Generate Q in VT (Workspace: need M*M+M+N, prefer
  5208. M*M+M+N*NB) */
  5209. i__2 = *lwork - iwork + 1;
  5210. NUMlapack_dorglq (n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[iwork], &i__2,
  5211. &ierr);
  5212. /* Copy L to WORK(IU), zeroing out above it */
  5213. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &work[iu], &ldwrku);
  5214. i__2 = *m - 1;
  5215. i__3 = *m - 1;
  5216. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &work[iu + ldwrku], &ldwrku);
  5217. ie = itau;
  5218. itauq = ie + *m;
  5219. itaup = itauq + *m;
  5220. iwork = itaup + *m;
  5221. /* Bidiagonalize L in WORK(IU), copying result to U
  5222. (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */
  5223. i__2 = *lwork - iwork + 1;
  5224. NUMlapack_dgebrd (m, m, &work[iu], &ldwrku, &s[1], &work[ie], &work[itauq],
  5225. &work[itaup], &work[iwork], &i__2, &ierr);
  5226. NUMlapack_dlacpy ("L", m, m, &work[iu], &ldwrku, &u[u_offset], ldu);
  5227. /* Generate right bidiagonalizing vectors in WORK(IU)
  5228. (Workspace: need M*M+4*M, prefer M*M+3*M+(M-1)*NB)
  5229. */
  5230. i__2 = *lwork - iwork + 1;
  5231. NUMlapack_dorgbr ("P", m, m, m, &work[iu], &ldwrku, &work[itaup], &work[iwork], &i__2,
  5232. &ierr);
  5233. /* Generate left bidiagonalizing vectors in U
  5234. (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB) */
  5235. i__2 = *lwork - iwork + 1;
  5236. NUMlapack_dorgbr ("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &work[iwork], &i__2,
  5237. &ierr);
  5238. iwork = ie + *m;
  5239. /* Perform bidiagonal QR iteration, computing left
  5240. singular vectors of L in U and computing right
  5241. singular vectors of L in WORK(IU) (Workspace: need
  5242. M*M+BDSPAC) */
  5243. NUMlapack_dbdsqr ("U", m, m, m, &c__0, &s[1], &work[ie], &work[iu], &ldwrku,
  5244. &u[u_offset], ldu, dum, &c__1, &work[iwork], info);
  5245. /* Multiply right singular vectors of L in WORK(IU)
  5246. by Q in VT, storing result in A (Workspace: need
  5247. M*M) */
  5248. NUMblas_dgemm ("N", "N", m, n, m, &c_b438, &work[iu], &ldwrku, &vt[vt_offset], ldvt, &c_b416,
  5249. &a[a_offset], lda);
  5250. /* Copy right singular vectors of A from A to VT */
  5251. NUMlapack_dlacpy ("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  5252. } else {
  5253. /* Insufficient workspace for a fast algorithm */
  5254. itau = 1;
  5255. iwork = itau + *m;
  5256. /* Compute A=L*Q, copying result to VT (Workspace:
  5257. need 2*M, prefer M+M*NB) */
  5258. i__2 = *lwork - iwork + 1;
  5259. NUMlapack_dgelqf (m, n, &a[a_offset], lda, &work[itau], &work[iwork], &i__2, &ierr);
  5260. NUMlapack_dlacpy ("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  5261. /* Generate Q in VT (Workspace: need M+N, prefer
  5262. M+N*NB) */
  5263. i__2 = *lwork - iwork + 1;
  5264. NUMlapack_dorglq (n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[iwork], &i__2,
  5265. &ierr);
  5266. /* Copy L to U, zeroing out above it */
  5267. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
  5268. i__2 = *m - 1;
  5269. i__3 = *m - 1;
  5270. NUMlapack_dlaset ("U", &i__2, &i__3, &c_b416, &c_b416, &u_ref (1, 2), ldu);
  5271. ie = itau;
  5272. itauq = ie + *m;
  5273. itaup = itauq + *m;
  5274. iwork = itaup + *m;
  5275. /* Bidiagonalize L in U (Workspace: need 4*M, prefer
  5276. 3*M+2*M*NB) */
  5277. i__2 = *lwork - iwork + 1;
  5278. NUMlapack_dgebrd (m, m, &u[u_offset], ldu, &s[1], &work[ie], &work[itauq], &work[itaup],
  5279. &work[iwork], &i__2, &ierr);
  5280. /* Multiply right bidiagonalizing vectors in U by Q
  5281. in VT (Workspace: need 3*M+N, prefer 3*M+N*NB) */
  5282. i__2 = *lwork - iwork + 1;
  5283. NUMlapack_dormbr ("P", "L", "T", m, n, m, &u[u_offset], ldu, &work[itaup],
  5284. &vt[vt_offset], ldvt, &work[iwork], &i__2, &ierr);
  5285. /* Generate left bidiagonalizing vectors in U
  5286. (Workspace: need 4*M, prefer 3*M+M*NB) */
  5287. i__2 = *lwork - iwork + 1;
  5288. NUMlapack_dorgbr ("Q", m, m, m, &u[u_offset], ldu, &work[itauq], &work[iwork], &i__2,
  5289. &ierr);
  5290. iwork = ie + *m;
  5291. /* Perform bidiagonal QR iteration, computing left
  5292. singular vectors of A in U and computing right
  5293. singular vectors of A in VT (Workspace: need
  5294. BDSPAC) */
  5295. NUMlapack_dbdsqr ("U", m, n, m, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  5296. &u[u_offset], ldu, dum, &c__1, &work[iwork], info);
  5297. }
  5298. }
  5299. }
  5300. } else {
  5301. /* N .LT. MNTHR
  5302. Path 10t(N greater than M, but not much larger) Reduce to
  5303. bidiagonal form without LQ decomposition */
  5304. ie = 1;
  5305. itauq = ie + *m;
  5306. itaup = itauq + *m;
  5307. iwork = itaup + *m;
  5308. /* Bidiagonalize A (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */
  5309. i__2 = *lwork - iwork + 1;
  5310. NUMlapack_dgebrd (m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &work[itaup],
  5311. &work[iwork], &i__2, &ierr);
  5312. if (wntuas) {
  5313. /* If left singular vectors desired in U, copy result to U
  5314. and generate left bidiagonalizing vectors in U (Workspace:
  5315. need 4*M-1, prefer 3*M+(M-1)*NB) */
  5316. NUMlapack_dlacpy ("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
  5317. i__2 = *lwork - iwork + 1;
  5318. NUMlapack_dorgbr ("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[iwork], &i__2, &ierr);
  5319. }
  5320. if (wntvas) {
  5321. /* If right singular vectors desired in VT, copy result to VT
  5322. and generate right bidiagonalizing vectors in VT
  5323. (Workspace: need 3*M+NRVT, prefer 3*M+NRVT*NB) */
  5324. NUMlapack_dlacpy ("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  5325. if (wntva) {
  5326. nrvt = *n;
  5327. }
  5328. if (wntvs) {
  5329. nrvt = *m;
  5330. }
  5331. i__2 = *lwork - iwork + 1;
  5332. NUMlapack_dorgbr ("P", &nrvt, n, m, &vt[vt_offset], ldvt, &work[itaup], &work[iwork], &i__2,
  5333. &ierr);
  5334. }
  5335. if (wntuo) {
  5336. /* If left singular vectors desired in A, generate left
  5337. bidiagonalizing vectors in A (Workspace: need 4*M-1,
  5338. prefer 3*M+(M-1)*NB) */
  5339. i__2 = *lwork - iwork + 1;
  5340. NUMlapack_dorgbr ("Q", m, m, n, &a[a_offset], lda, &work[itauq], &work[iwork], &i__2, &ierr);
  5341. }
  5342. if (wntvo) {
  5343. /* If right singular vectors desired in A, generate right
  5344. bidiagonalizing vectors in A (Workspace: need 4*M, prefer
  5345. 3*M+M*NB) */
  5346. i__2 = *lwork - iwork + 1;
  5347. NUMlapack_dorgbr ("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[iwork], &i__2, &ierr);
  5348. }
  5349. iwork = ie + *m;
  5350. if (wntuas || wntuo) {
  5351. nru = *m;
  5352. }
  5353. if (wntun) {
  5354. nru = 0;
  5355. }
  5356. if (wntvas || wntvo) {
  5357. ncvt = *n;
  5358. }
  5359. if (wntvn) {
  5360. ncvt = 0;
  5361. }
  5362. if (!wntuo && !wntvo) {
  5363. /* Perform bidiagonal QR iteration, if desired, computing
  5364. left singular vectors in U and computing right singular
  5365. vectors in VT (Workspace: need BDSPAC) */
  5366. NUMlapack_dbdsqr ("L", m, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  5367. &u[u_offset], ldu, dum, &c__1, &work[iwork], info);
  5368. } else if (!wntuo && wntvo) {
  5369. /* Perform bidiagonal QR iteration, if desired, computing
  5370. left singular vectors in U and computing right singular
  5371. vectors in A (Workspace: need BDSPAC) */
  5372. NUMlapack_dbdsqr ("L", m, &ncvt, &nru, &c__0, &s[1], &work[ie], &a[a_offset], lda, &u[u_offset],
  5373. ldu, dum, &c__1, &work[iwork], info);
  5374. } else {
  5375. /* Perform bidiagonal QR iteration, if desired, computing
  5376. left singular vectors in A and computing right singular
  5377. vectors in VT (Workspace: need BDSPAC) */
  5378. NUMlapack_dbdsqr ("L", m, &ncvt, &nru, &c__0, &s[1], &work[ie], &vt[vt_offset], ldvt,
  5379. &a[a_offset], lda, dum, &c__1, &work[iwork], info);
  5380. }
  5381. }
  5382. }
  5383. /* If DBDSQR failed to converge, copy unconverged superdiagonals to WORK(
  5384. 2:MINMN ) */
  5385. if (*info != 0) {
  5386. if (ie > 2) {
  5387. i__2 = minmn - 1;
  5388. for (i__ = 1; i__ <= i__2; ++i__) {
  5389. work[i__ + 1] = work[i__ + ie - 1];
  5390. /* L50: */
  5391. }
  5392. }
  5393. if (ie < 2) {
  5394. for (i__ = minmn - 1; i__ >= 1; --i__) {
  5395. work[i__ + 1] = work[i__ + ie - 1];
  5396. /* L60: */
  5397. }
  5398. }
  5399. }
  5400. /* Undo scaling if necessary */
  5401. if (iscl == 1) {
  5402. if (anrm > bignum) {
  5403. NUMlapack_dlascl ("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &minmn, &ierr);
  5404. }
  5405. if (*info != 0 && anrm > bignum) {
  5406. i__2 = minmn - 1;
  5407. NUMlapack_dlascl ("G", &c__0, &c__0, &bignum, &anrm, &i__2, &c__1, &work[2], &minmn, &ierr);
  5408. }
  5409. if (anrm < smlnum) {
  5410. NUMlapack_dlascl ("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &minmn, &ierr);
  5411. }
  5412. if (*info != 0 && anrm < smlnum) {
  5413. i__2 = minmn - 1;
  5414. NUMlapack_dlascl ("G", &c__0, &c__0, &smlnum, &anrm, &i__2, &c__1, &work[2], &minmn, &ierr);
  5415. }
  5416. }
  5417. /* Return optimal workspace in WORK(1) */
  5418. work[1] = (double) maxwrk;
  5419. return 0;
  5420. } /* NUMlapack_dgesvd */
  5421. #undef vt_ref
  5422. #undef u_ref
  5423. int NUMlapack_dgetf2 (integer *m, integer *n, double *a, integer *lda, integer *ipiv, integer *info) {
  5424. /* Table of constant values */
  5425. static integer c__1 = 1;
  5426. static double c_b6 = -1.;
  5427. /* System generated locals */
  5428. integer a_dim1, a_offset, i__1, i__2, i__3;
  5429. double d__1;
  5430. /* Local variables */
  5431. static integer j;
  5432. static integer jp;
  5433. a_dim1 = *lda;
  5434. a_offset = 1 + a_dim1 * 1;
  5435. a -= a_offset;
  5436. --ipiv;
  5437. /* Function Body */
  5438. *info = 0;
  5439. if (*m < 0) {
  5440. *info = -1;
  5441. } else if (*n < 0) {
  5442. *info = -2;
  5443. } else if (*lda < MAX (1, *m)) {
  5444. *info = -4;
  5445. }
  5446. if (*info != 0) {
  5447. i__1 = - (*info);
  5448. xerbla_ ("DGETF2", &i__1);
  5449. return 0;
  5450. }
  5451. /* Quick return if possible */
  5452. if (*m == 0 || *n == 0) {
  5453. return 0;
  5454. }
  5455. i__1 = MIN (*m, *n);
  5456. for (j = 1; j <= i__1; ++j) {
  5457. /* Find pivot and test for singularity. */
  5458. i__2 = *m - j + 1;
  5459. jp = j - 1 + NUMblas_idamax (&i__2, &a_ref (j, j), &c__1);
  5460. ipiv[j] = jp;
  5461. if (a_ref (jp, j) != 0.) {
  5462. /* Apply the interchange to columns 1:N. */
  5463. if (jp != j) {
  5464. NUMblas_dswap (n, &a_ref (j, 1), lda, &a_ref (jp, 1), lda);
  5465. }
  5466. /* Compute elements J+1:M of J-th column. */
  5467. if (j < *m) {
  5468. i__2 = *m - j;
  5469. d__1 = 1. / a_ref (j, j);
  5470. NUMblas_dscal (&i__2, &d__1, &a_ref (j + 1, j), &c__1);
  5471. }
  5472. } else if (*info == 0) {
  5473. *info = j;
  5474. }
  5475. if (j < MIN (*m, *n)) {
  5476. /* Update trailing submatrix. */
  5477. i__2 = *m - j;
  5478. i__3 = *n - j;
  5479. NUMblas_dger (&i__2, &i__3, &c_b6, &a_ref (j + 1, j), &c__1, &a_ref (j, j + 1), lda, &a_ref (j + 1,
  5480. j + 1), lda);
  5481. }
  5482. /* L10: */
  5483. }
  5484. return 0;
  5485. } /* NUMlapack_dgetf2 */
  5486. int NUMlapack_dgetri (integer *n, double *a, integer *lda, integer *ipiv, double *work, integer *lwork, integer *info) {
  5487. /* Table of constant values */
  5488. static integer c__1 = 1;
  5489. static integer c_n1 = -1;
  5490. static integer c__2 = 2;
  5491. static double c_b20 = -1.;
  5492. static double c_b22 = 1.;
  5493. /* System generated locals */
  5494. integer a_dim1, a_offset, i__1, i__2, i__3;
  5495. /* Local variables */
  5496. static integer i__, j;
  5497. static integer nbmin;
  5498. static integer jb, nb, jj, jp, nn;
  5499. static integer ldwork;
  5500. static integer lwkopt;
  5501. static integer lquery;
  5502. static integer iws;
  5503. a_dim1 = *lda;
  5504. a_offset = 1 + a_dim1 * 1;
  5505. a -= a_offset;
  5506. --ipiv;
  5507. --work;
  5508. /* Function Body */
  5509. *info = 0;
  5510. nb = NUMlapack_ilaenv (&c__1, "DGETRI", " ", n, &c_n1, &c_n1, &c_n1, 6, 1);
  5511. lwkopt = *n * nb;
  5512. work[1] = (double) lwkopt;
  5513. lquery = *lwork == -1;
  5514. if (*n < 0) {
  5515. *info = -1;
  5516. } else if (*lda < MAX (1, *n)) {
  5517. *info = -3;
  5518. } else if (*lwork < MAX (1, *n) && !lquery) {
  5519. *info = -6;
  5520. }
  5521. if (*info != 0) {
  5522. i__1 = - (*info);
  5523. xerbla_ ("DGETRI", &i__1);
  5524. return 0;
  5525. } else if (lquery) {
  5526. return 0;
  5527. }
  5528. /* Quick return if possible */
  5529. if (*n == 0) {
  5530. return 0;
  5531. }
  5532. /* Form inv(U). If INFO > 0 from DTRTRI, then U is singular, and the
  5533. inverse is not computed. */
  5534. NUMlapack_dtrtri ("Upper", "Non-unit", n, &a[a_offset], lda, info);
  5535. if (*info > 0) {
  5536. return 0;
  5537. }
  5538. nbmin = 2;
  5539. ldwork = *n;
  5540. if (nb > 1 && nb < *n) {
  5541. /* Computing MAX */
  5542. i__1 = ldwork * nb;
  5543. iws = MAX (i__1, 1);
  5544. if (*lwork < iws) {
  5545. nb = *lwork / ldwork;
  5546. /* Computing MAX */
  5547. i__1 = 2, i__2 = NUMlapack_ilaenv (&c__2, "DGETRI", " ", n, &c_n1, &c_n1, &c_n1, 6, 1);
  5548. nbmin = MAX (i__1, i__2);
  5549. }
  5550. } else {
  5551. iws = *n;
  5552. }
  5553. /* Solve the equation inv(A)*L = inv(U) for inv(A). */
  5554. if (nb < nbmin || nb >= *n) {
  5555. /* Use unblocked code. */
  5556. for (j = *n; j >= 1; --j) {
  5557. /* Copy current column of L to WORK and replace with zeros. */
  5558. i__1 = *n;
  5559. for (i__ = j + 1; i__ <= i__1; ++i__) {
  5560. work[i__] = a_ref (i__, j);
  5561. a_ref (i__, j) = 0.;
  5562. /* L10: */
  5563. }
  5564. /* Compute current column of inv(A). */
  5565. if (j < *n) {
  5566. i__1 = *n - j;
  5567. NUMblas_dgemv ("No transpose", n, &i__1, &c_b20, &a_ref (1, j + 1), lda, &work[j + 1], &c__1, &c_b22,
  5568. &a_ref (1, j), &c__1);
  5569. }
  5570. /* L20: */
  5571. }
  5572. } else {
  5573. /* Use blocked code. */
  5574. nn = (*n - 1) / nb * nb + 1;
  5575. i__1 = -nb;
  5576. for (j = nn; i__1 < 0 ? j >= 1 : j <= 1; j += i__1) {
  5577. /* Computing MIN */
  5578. i__2 = nb, i__3 = *n - j + 1;
  5579. jb = MIN (i__2, i__3);
  5580. /* Copy current block column of L to WORK and replace with zeros.
  5581. */
  5582. i__2 = j + jb - 1;
  5583. for (jj = j; jj <= i__2; ++jj) {
  5584. i__3 = *n;
  5585. for (i__ = jj + 1; i__ <= i__3; ++i__) {
  5586. work[i__ + (jj - j) * ldwork] = a_ref (i__, jj);
  5587. a_ref (i__, jj) = 0.;
  5588. /* L30: */
  5589. }
  5590. /* L40: */
  5591. }
  5592. /* Compute current block column of inv(A). */
  5593. if (j + jb <= *n) {
  5594. i__2 = *n - j - jb + 1;
  5595. NUMblas_dgemm ("No transpose", "No transpose", n, &jb, &i__2, &c_b20, &a_ref (1, j + jb), lda,
  5596. &work[j + jb], &ldwork, &c_b22, &a_ref (1, j), lda);
  5597. }
  5598. NUMblas_dtrsm ("Right", "Lower", "No transpose", "Unit", n, &jb, &c_b22, &work[j], &ldwork, &a_ref (1,
  5599. j), lda);
  5600. /* L50: */
  5601. }
  5602. }
  5603. /* Apply column interchanges. */
  5604. for (j = *n - 1; j >= 1; --j) {
  5605. jp = ipiv[j];
  5606. if (jp != j) {
  5607. NUMblas_dswap (n, &a_ref (1, j), &c__1, &a_ref (1, jp), &c__1);
  5608. }
  5609. /* L60: */
  5610. }
  5611. work[1] = (double) iws;
  5612. return 0;
  5613. } /* NUMlapack_dgetri */
  5614. int NUMlapack_dgetrf (integer *m, integer *n, double *a, integer *lda, integer *ipiv, integer *info) {
  5615. /* Table of constant values */
  5616. static integer c__1 = 1;
  5617. static integer c_n1 = -1;
  5618. static double c_b16 = 1.;
  5619. static double c_b19 = -1.;
  5620. /* System generated locals */
  5621. integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
  5622. /* Local variables */
  5623. static integer i__, j;
  5624. static integer iinfo;
  5625. static integer jb, nb;
  5626. a_dim1 = *lda;
  5627. a_offset = 1 + a_dim1 * 1;
  5628. a -= a_offset;
  5629. --ipiv;
  5630. /* Function Body */
  5631. *info = 0;
  5632. if (*m < 0) {
  5633. *info = -1;
  5634. } else if (*n < 0) {
  5635. *info = -2;
  5636. } else if (*lda < MAX (1, *m)) {
  5637. *info = -4;
  5638. }
  5639. if (*info != 0) {
  5640. i__1 = - (*info);
  5641. xerbla_ ("DGETRF", &i__1);
  5642. return 0;
  5643. }
  5644. /* Quick return if possible */
  5645. if (*m == 0 || *n == 0) {
  5646. return 0;
  5647. }
  5648. /* Determine the block size for this environment. */
  5649. nb = NUMlapack_ilaenv (&c__1, "DGETRF", " ", m, n, &c_n1, &c_n1, 6, 1);
  5650. if (nb <= 1 || nb >= MIN (*m, *n)) {
  5651. /* Use unblocked code. */
  5652. NUMlapack_dgetf2 (m, n, &a[a_offset], lda, &ipiv[1], info);
  5653. } else {
  5654. /* Use blocked code. */
  5655. i__1 = MIN (*m, *n);
  5656. i__2 = nb;
  5657. for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
  5658. /* Computing MIN */
  5659. i__3 = MIN (*m, *n) - j + 1;
  5660. jb = MIN (i__3, nb);
  5661. /* Factor diagonal and subdiagonal blocks and test for exact
  5662. singularity. */
  5663. i__3 = *m - j + 1;
  5664. NUMlapack_dgetf2 (&i__3, &jb, &a_ref (j, j), lda, &ipiv[j], &iinfo);
  5665. /* Adjust INFO and the pivot indices. */
  5666. if (*info == 0 && iinfo > 0) {
  5667. *info = iinfo + j - 1;
  5668. }
  5669. /* Computing MIN */
  5670. i__4 = *m, i__5 = j + jb - 1;
  5671. i__3 = MIN (i__4, i__5);
  5672. for (i__ = j; i__ <= i__3; ++i__) {
  5673. ipiv[i__] = j - 1 + ipiv[i__];
  5674. /* L10: */
  5675. }
  5676. /* Apply interchanges to columns 1:J-1. */
  5677. i__3 = j - 1;
  5678. i__4 = j + jb - 1;
  5679. NUMlapack_dlaswp (&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
  5680. if (j + jb <= *n) {
  5681. /* Apply interchanges to columns J+JB:N. */
  5682. i__3 = *n - j - jb + 1;
  5683. i__4 = j + jb - 1;
  5684. NUMlapack_dlaswp (&i__3, &a_ref (1, j + jb), lda, &j, &i__4, &ipiv[1], &c__1);
  5685. /* Compute block row of U. */
  5686. i__3 = *n - j - jb + 1;
  5687. NUMblas_dtrsm ("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &c_b16, &a_ref (j, j), lda,
  5688. &a_ref (j, j + jb), lda);
  5689. if (j + jb <= *m) {
  5690. /* Update trailing submatrix. */
  5691. i__3 = *m - j - jb + 1;
  5692. i__4 = *n - j - jb + 1;
  5693. NUMblas_dgemm ("No transpose", "No transpose", &i__3, &i__4, &jb, &c_b19, &a_ref (j + jb, j),
  5694. lda, &a_ref (j, j + jb), lda, &c_b16, &a_ref (j + jb, j + jb), lda);
  5695. }
  5696. }
  5697. /* L20: */
  5698. }
  5699. }
  5700. return 0;
  5701. } /* NUMlapack_dgetrf */
  5702. int NUMlapack_dgetrs (const char *trans, integer *n, integer *nrhs, double *a, integer *lda, integer *ipiv, double *b, integer *ldb,
  5703. integer *info) {
  5704. /* Table of constant values */
  5705. static integer c__1 = 1;
  5706. static double c_b12 = 1.;
  5707. static integer c_n1 = -1;
  5708. /* System generated locals */
  5709. integer a_dim1, a_offset, b_dim1, b_offset, i__1;
  5710. /* Local variables */
  5711. static integer notran;
  5712. a_dim1 = *lda;
  5713. a_offset = 1 + a_dim1 * 1;
  5714. a -= a_offset;
  5715. --ipiv;
  5716. b_dim1 = *ldb;
  5717. b_offset = 1 + b_dim1 * 1;
  5718. b -= b_offset;
  5719. /* Function Body */
  5720. *info = 0;
  5721. notran = lsame_ (trans, "N");
  5722. if (!notran && !lsame_ (trans, "T") && !lsame_ (trans, "C")) {
  5723. *info = -1;
  5724. } else if (*n < 0) {
  5725. *info = -2;
  5726. } else if (*nrhs < 0) {
  5727. *info = -3;
  5728. } else if (*lda < MAX (1, *n)) {
  5729. *info = -5;
  5730. } else if (*ldb < MAX (1, *n)) {
  5731. *info = -8;
  5732. }
  5733. if (*info != 0) {
  5734. i__1 = - (*info);
  5735. xerbla_ ("DGETRS", &i__1);
  5736. return 0;
  5737. }
  5738. /* Quick return if possible */
  5739. if (*n == 0 || *nrhs == 0) {
  5740. return 0;
  5741. }
  5742. if (notran) {
  5743. /* Solve A * X = B.
  5744. Apply row interchanges to the right hand sides. */
  5745. NUMlapack_dlaswp (nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
  5746. /* Solve L*X = B, overwriting B with X. */
  5747. NUMblas_dtrsm ("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b12, &a[a_offset], lda, &b[b_offset],
  5748. ldb);
  5749. /* Solve U*X = B, overwriting B with X. */
  5750. NUMblas_dtrsm ("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b12, &a[a_offset], lda, &b[b_offset],
  5751. ldb);
  5752. } else {
  5753. /* Solve A' * X = B.
  5754. Solve U'*X = B, overwriting B with X. */
  5755. NUMblas_dtrsm ("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b12, &a[a_offset], lda, &b[b_offset],
  5756. ldb);
  5757. /* Solve L'*X = B, overwriting B with X. */
  5758. NUMblas_dtrsm ("Left", "Lower", "Transpose", "Unit", n, nrhs, &c_b12, &a[a_offset], lda, &b[b_offset], ldb);
  5759. /* Apply row interchanges to the solution vectors. */
  5760. NUMlapack_dlaswp (nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
  5761. }
  5762. return 0;
  5763. } /* NUMlapack_dgetrs */
  5764. int NUMlapack_dggsvd (const char *jobu, const char *jobv, const char *jobq, integer *m, integer *n, integer *p, integer *k, integer *l,
  5765. double *a, integer *lda, double *b, integer *ldb, double *alpha, double *beta, double *u, integer *ldu, double *v,
  5766. integer *ldv, double *q, integer *ldq, double *work, integer *iwork, integer *info) {
  5767. /* Table of constant values */
  5768. static integer c__1 = 1;
  5769. /* System generated locals */
  5770. integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2;
  5771. /* Local variables */
  5772. static integer ibnd;
  5773. static double tola;
  5774. static integer isub;
  5775. static double tolb, unfl, temp, smax;
  5776. static integer i__, j;
  5777. static double anorm, bnorm;
  5778. static integer wantq, wantu, wantv;
  5779. static integer ncycle;
  5780. static double ulp;
  5781. a_dim1 = *lda;
  5782. a_offset = 1 + a_dim1 * 1;
  5783. a -= a_offset;
  5784. b_dim1 = *ldb;
  5785. b_offset = 1 + b_dim1 * 1;
  5786. b -= b_offset;
  5787. --alpha;
  5788. --beta;
  5789. u_dim1 = *ldu;
  5790. u_offset = 1 + u_dim1 * 1;
  5791. u -= u_offset;
  5792. v_dim1 = *ldv;
  5793. v_offset = 1 + v_dim1 * 1;
  5794. v -= v_offset;
  5795. q_dim1 = *ldq;
  5796. q_offset = 1 + q_dim1 * 1;
  5797. q -= q_offset;
  5798. --work;
  5799. --iwork;
  5800. /* Function Body */
  5801. wantu = lsame_ (jobu, "U");
  5802. wantv = lsame_ (jobv, "V");
  5803. wantq = lsame_ (jobq, "Q");
  5804. *info = 0;
  5805. if (! (wantu || lsame_ (jobu, "N"))) {
  5806. *info = -1;
  5807. } else if (! (wantv || lsame_ (jobv, "N"))) {
  5808. *info = -2;
  5809. } else if (! (wantq || lsame_ (jobq, "N"))) {
  5810. *info = -3;
  5811. } else if (*m < 0) {
  5812. *info = -4;
  5813. } else if (*n < 0) {
  5814. *info = -5;
  5815. } else if (*p < 0) {
  5816. *info = -6;
  5817. } else if (*lda < MAX (1, *m)) {
  5818. *info = -10;
  5819. } else if (*ldb < MAX (1, *p)) {
  5820. *info = -12;
  5821. } else if (*ldu < 1 || wantu && *ldu < *m) {
  5822. *info = -16;
  5823. } else if (*ldv < 1 || wantv && *ldv < *p) {
  5824. *info = -18;
  5825. } else if (*ldq < 1 || wantq && *ldq < *n) {
  5826. *info = -20;
  5827. }
  5828. if (*info != 0) {
  5829. i__1 = - (*info);
  5830. xerbla_ ("DGGSVD", &i__1);
  5831. return 0;
  5832. }
  5833. /* Compute the Frobenius norm of matrices A and B */
  5834. anorm = NUMlapack_dlange ("1", m, n, &a[a_offset], lda, &work[1]);
  5835. bnorm = NUMlapack_dlange ("1", p, n, &b[b_offset], ldb, &work[1]);
  5836. /* Get machine precision and set up threshold for determining the
  5837. effective numerical rank of the matrices A and B. */
  5838. ulp = NUMblas_dlamch ("Precision");
  5839. unfl = NUMblas_dlamch ("Safe Minimum");
  5840. tola = MAX (*m, *n) * MAX (anorm, unfl) * ulp;
  5841. tolb = MAX (*p, *n) * MAX (bnorm, unfl) * ulp;
  5842. /* Preprocessing */
  5843. NUMlapack_dggsvp (jobu, jobv, jobq, m, p, n, &a[a_offset], lda, &b[b_offset], ldb, &tola, &tolb, k, l,
  5844. &u[u_offset], ldu, &v[v_offset], ldv, &q[q_offset], ldq, &iwork[1], &work[1], &work[*n + 1], info);
  5845. /* Compute the GSVD of two upper "triangular" matrices */
  5846. NUMlapack_dtgsja (jobu, jobv, jobq, m, p, n, k, l, &a[a_offset], lda, &b[b_offset], ldb, &tola, &tolb,
  5847. &alpha[1], &beta[1], &u[u_offset], ldu, &v[v_offset], ldv, &q[q_offset], ldq, &work[1], &ncycle,
  5848. info);
  5849. /* Sort the singular values and store the pivot indices in IWORK Copy
  5850. ALPHA to WORK, then sort ALPHA in WORK */
  5851. NUMblas_dcopy (n, &alpha[1], &c__1, &work[1], &c__1);
  5852. /* Computing MIN */
  5853. i__1 = *l, i__2 = *m - *k;
  5854. ibnd = MIN (i__1, i__2);
  5855. i__1 = ibnd;
  5856. for (i__ = 1; i__ <= i__1; ++i__) {
  5857. /* Scan for largest ALPHA(K+I) */
  5858. isub = i__;
  5859. smax = work[*k + i__];
  5860. i__2 = ibnd;
  5861. for (j = i__ + 1; j <= i__2; ++j) {
  5862. temp = work[*k + j];
  5863. if (temp > smax) {
  5864. isub = j;
  5865. smax = temp;
  5866. }
  5867. /* L10: */
  5868. }
  5869. if (isub != i__) {
  5870. work[*k + isub] = work[*k + i__];
  5871. work[*k + i__] = smax;
  5872. iwork[*k + i__] = *k + isub;
  5873. } else {
  5874. iwork[*k + i__] = *k + i__;
  5875. }
  5876. /* L20: */
  5877. }
  5878. return 0;
  5879. } /* NUMlapack_dggsvd */
  5880. #define b_ref(a_1,a_2) b[(a_2)*b_dim1 + a_1]
  5881. #define u_ref(a_1,a_2) u[(a_2)*u_dim1 + a_1]
  5882. #define v_ref(a_1,a_2) v[(a_2)*v_dim1 + a_1]
  5883. int NUMlapack_dggsvp (const char *jobu, const char *jobv, const char *jobq, integer *m, integer *p, integer *n, double *a, integer *lda,
  5884. double *b, integer *ldb, double *tola, double *tolb, integer *k, integer *l, double *u, integer *ldu, double *v,
  5885. integer *ldv, double *q, integer *ldq, integer *iwork, double *tau, double *work, integer *info) {
  5886. /* Table of constant values */
  5887. static double c_b12 = 0.;
  5888. static double c_b22 = 1.;
  5889. /* System generated locals */
  5890. integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
  5891. i__3;
  5892. double d__1;
  5893. /* Local variables */
  5894. static integer i__, j;
  5895. static integer wantq, wantu, wantv;
  5896. static integer forwrd;
  5897. a_dim1 = *lda;
  5898. a_offset = 1 + a_dim1 * 1;
  5899. a -= a_offset;
  5900. b_dim1 = *ldb;
  5901. b_offset = 1 + b_dim1 * 1;
  5902. b -= b_offset;
  5903. u_dim1 = *ldu;
  5904. u_offset = 1 + u_dim1 * 1;
  5905. u -= u_offset;
  5906. v_dim1 = *ldv;
  5907. v_offset = 1 + v_dim1 * 1;
  5908. v -= v_offset;
  5909. q_dim1 = *ldq;
  5910. q_offset = 1 + q_dim1 * 1;
  5911. q -= q_offset;
  5912. --iwork;
  5913. --tau;
  5914. --work;
  5915. /* Function Body */
  5916. wantu = lsame_ (jobu, "U");
  5917. wantv = lsame_ (jobv, "V");
  5918. wantq = lsame_ (jobq, "Q");
  5919. forwrd = TRUE;
  5920. *info = 0;
  5921. if (! (wantu || lsame_ (jobu, "N"))) {
  5922. *info = -1;
  5923. } else if (! (wantv || lsame_ (jobv, "N"))) {
  5924. *info = -2;
  5925. } else if (! (wantq || lsame_ (jobq, "N"))) {
  5926. *info = -3;
  5927. } else if (*m < 0) {
  5928. *info = -4;
  5929. } else if (*p < 0) {
  5930. *info = -5;
  5931. } else if (*n < 0) {
  5932. *info = -6;
  5933. } else if (*lda < MAX (1, *m)) {
  5934. *info = -8;
  5935. } else if (*ldb < MAX (1, *p)) {
  5936. *info = -10;
  5937. } else if (*ldu < 1 || wantu && *ldu < *m) {
  5938. *info = -16;
  5939. } else if (*ldv < 1 || wantv && *ldv < *p) {
  5940. *info = -18;
  5941. } else if (*ldq < 1 || wantq && *ldq < *n) {
  5942. *info = -20;
  5943. }
  5944. if (*info != 0) {
  5945. i__1 = - (*info);
  5946. xerbla_ ("DGGSVP", &i__1);
  5947. return 0;
  5948. }
  5949. /* QR with column pivoting of B: B*P = V*( S11 S12 ) ( 0 0 ) */
  5950. i__1 = *n;
  5951. for (i__ = 1; i__ <= i__1; ++i__) {
  5952. iwork[i__] = 0;
  5953. /* L10: */
  5954. }
  5955. NUMlapack_dgeqpf (p, n, &b[b_offset], ldb, &iwork[1], &tau[1], &work[1], info);
  5956. /* Update A := A*P */
  5957. NUMlapack_dlapmt (&forwrd, m, n, &a[a_offset], lda, &iwork[1]);
  5958. /* Determine the effective rank of matrix B. */
  5959. *l = 0;
  5960. i__1 = MIN (*p, *n);
  5961. for (i__ = 1; i__ <= i__1; ++i__) {
  5962. if ( (d__1 = b_ref (i__, i__), fabs (d__1)) > *tolb) {
  5963. ++ (*l);
  5964. }
  5965. /* L20: */
  5966. }
  5967. if (wantv) {
  5968. /* Copy the details of V, and form V. */
  5969. NUMlapack_dlaset ("Full", p, p, &c_b12, &c_b12, &v[v_offset], ldv);
  5970. if (*p > 1) {
  5971. i__1 = *p - 1;
  5972. NUMlapack_dlacpy ("Lower", &i__1, n, &b_ref (2, 1), ldb, &v_ref (2, 1), ldv);
  5973. }
  5974. i__1 = MIN (*p, *n);
  5975. NUMlapack_dorg2r (p, p, &i__1, &v[v_offset], ldv, &tau[1], &work[1], info);
  5976. }
  5977. /* Clean up B */
  5978. i__1 = *l - 1;
  5979. for (j = 1; j <= i__1; ++j) {
  5980. i__2 = *l;
  5981. for (i__ = j + 1; i__ <= i__2; ++i__) {
  5982. b_ref (i__, j) = 0.;
  5983. /* L30: */
  5984. }
  5985. /* L40: */
  5986. }
  5987. if (*p > *l) {
  5988. i__1 = *p - *l;
  5989. NUMlapack_dlaset ("Full", &i__1, n, &c_b12, &c_b12, &b_ref (*l + 1, 1), ldb);
  5990. }
  5991. if (wantq) {
  5992. /* Set Q = I and Update Q := Q*P */
  5993. NUMlapack_dlaset ("Full", n, n, &c_b12, &c_b22, &q[q_offset], ldq);
  5994. NUMlapack_dlapmt (&forwrd, n, n, &q[q_offset], ldq, &iwork[1]);
  5995. }
  5996. if (*p >= *l && *n != *l) {
  5997. /* RQ factorization of (S11 S12): ( S11 S12 ) = ( 0 S12 )*Z */
  5998. NUMlapack_dgerq2 (l, n, &b[b_offset], ldb, &tau[1], &work[1], info);
  5999. /* Update A := A*Z' */
  6000. NUMlapack_dormr2 ("Right", "Transpose", m, n, l, &b[b_offset], ldb, &tau[1], &a[a_offset], lda,
  6001. &work[1], info);
  6002. if (wantq) {
  6003. /* Update Q := Q*Z' */
  6004. NUMlapack_dormr2 ("Right", "Transpose", n, n, l, &b[b_offset], ldb, &tau[1], &q[q_offset], ldq,
  6005. &work[1], info);
  6006. }
  6007. /* Clean up B */
  6008. i__1 = *n - *l;
  6009. NUMlapack_dlaset ("Full", l, &i__1, &c_b12, &c_b12, &b[b_offset], ldb);
  6010. i__1 = *n;
  6011. for (j = *n - *l + 1; j <= i__1; ++j) {
  6012. i__2 = *l;
  6013. for (i__ = j - *n + *l + 1; i__ <= i__2; ++i__) {
  6014. b_ref (i__, j) = 0.;
  6015. /* L50: */
  6016. }
  6017. /* L60: */
  6018. }
  6019. }
  6020. /* Let N-L L A = ( A11 A12 ) M,
  6021. then the following does the complete QR decomposition of A11:
  6022. A11 = U*( 0 T12 )*P1' ( 0 0 ) */
  6023. i__1 = *n - *l;
  6024. for (i__ = 1; i__ <= i__1; ++i__) {
  6025. iwork[i__] = 0;
  6026. /* L70: */
  6027. }
  6028. i__1 = *n - *l;
  6029. NUMlapack_dgeqpf (m, &i__1, &a[a_offset], lda, &iwork[1], &tau[1], &work[1], info);
  6030. /* Determine the effective rank of A11 */
  6031. *k = 0;
  6032. /* Computing MIN */
  6033. i__2 = *m, i__3 = *n - *l;
  6034. i__1 = MIN (i__2, i__3);
  6035. for (i__ = 1; i__ <= i__1; ++i__) {
  6036. if ( (d__1 = a_ref (i__, i__), fabs (d__1)) > *tola) {
  6037. ++ (*k);
  6038. }
  6039. /* L80: */
  6040. }
  6041. /* Update A12 := U'*A12, where A12 = A( 1:M, N-L+1:N )
  6042. Computing MIN */
  6043. i__2 = *m, i__3 = *n - *l;
  6044. i__1 = MIN (i__2, i__3);
  6045. NUMlapack_dorm2r ("Left", "Transpose", m, l, &i__1, &a[a_offset], lda, &tau[1], &a_ref (1, *n - *l + 1),
  6046. lda, &work[1], info);
  6047. if (wantu) {
  6048. /* Copy the details of U, and form U */
  6049. NUMlapack_dlaset ("Full", m, m, &c_b12, &c_b12, &u[u_offset], ldu);
  6050. if (*m > 1) {
  6051. i__1 = *m - 1;
  6052. i__2 = *n - *l;
  6053. NUMlapack_dlacpy ("Lower", &i__1, &i__2, &a_ref (2, 1), lda, &u_ref (2, 1), ldu);
  6054. }
  6055. /* Computing MIN */
  6056. i__2 = *m, i__3 = *n - *l;
  6057. i__1 = MIN (i__2, i__3);
  6058. NUMlapack_dorg2r (m, m, &i__1, &u[u_offset], ldu, &tau[1], &work[1], info);
  6059. }
  6060. if (wantq) {
  6061. /* Update Q( 1:N, 1:N-L ) = Q( 1:N, 1:N-L )*P1 */
  6062. i__1 = *n - *l;
  6063. NUMlapack_dlapmt (&forwrd, n, &i__1, &q[q_offset], ldq, &iwork[1]);
  6064. }
  6065. /* Clean up A: set the strictly lower triangular part of A(1:K, 1:K) = 0,
  6066. and A( K+1:M, 1:N-L ) = 0. */
  6067. i__1 = *k - 1;
  6068. for (j = 1; j <= i__1; ++j) {
  6069. i__2 = *k;
  6070. for (i__ = j + 1; i__ <= i__2; ++i__) {
  6071. a_ref (i__, j) = 0.;
  6072. /* L90: */
  6073. }
  6074. /* L100: */
  6075. }
  6076. if (*m > *k) {
  6077. i__1 = *m - *k;
  6078. i__2 = *n - *l;
  6079. NUMlapack_dlaset ("Full", &i__1, &i__2, &c_b12, &c_b12, &a_ref (*k + 1, 1), lda);
  6080. }
  6081. if (*n - *l > *k) {
  6082. /* RQ factorization of ( T11 T12 ) = ( 0 T12 )*Z1 */
  6083. i__1 = *n - *l;
  6084. NUMlapack_dgerq2 (k, &i__1, &a[a_offset], lda, &tau[1], &work[1], info);
  6085. if (wantq) {
  6086. /* Update Q( 1:N,1:N-L ) = Q( 1:N,1:N-L )*Z1' */
  6087. i__1 = *n - *l;
  6088. NUMlapack_dormr2 ("Right", "Transpose", n, &i__1, k, &a[a_offset], lda, &tau[1], &q[q_offset], ldq,
  6089. &work[1], info);
  6090. }
  6091. /* Clean up A */
  6092. i__1 = *n - *l - *k;
  6093. NUMlapack_dlaset ("Full", k, &i__1, &c_b12, &c_b12, &a[a_offset], lda);
  6094. i__1 = *n - *l;
  6095. for (j = *n - *l - *k + 1; j <= i__1; ++j) {
  6096. i__2 = *k;
  6097. for (i__ = j - *n + *l + *k + 1; i__ <= i__2; ++i__) {
  6098. a_ref (i__, j) = 0.;
  6099. /* L110: */
  6100. }
  6101. /* L120: */
  6102. }
  6103. }
  6104. if (*m > *k) {
  6105. /* QR factorization of A( K+1:M,N-L+1:N ) */
  6106. i__1 = *m - *k;
  6107. NUMlapack_dgeqr2 (&i__1, l, &a_ref (*k + 1, *n - *l + 1), lda, &tau[1], &work[1], info);
  6108. if (wantu) {
  6109. /* Update U(:,K+1:M) := U(:,K+1:M)*U1 */
  6110. i__1 = *m - *k;
  6111. /* Computing MIN */
  6112. i__3 = *m - *k;
  6113. i__2 = MIN (i__3, *l);
  6114. NUMlapack_dorm2r ("Right", "No transpose", m, &i__1, &i__2, &a_ref (*k + 1, *n - *l + 1), lda,
  6115. &tau[1], &u_ref (1, *k + 1), ldu, &work[1], info);
  6116. }
  6117. /* Clean up */
  6118. i__1 = *n;
  6119. for (j = *n - *l + 1; j <= i__1; ++j) {
  6120. i__2 = *m;
  6121. for (i__ = j - *n + *k + *l + 1; i__ <= i__2; ++i__) {
  6122. a_ref (i__, j) = 0.;
  6123. /* L130: */
  6124. }
  6125. /* L140: */
  6126. }
  6127. }
  6128. return 0;
  6129. } /* NUMlapack_dggsvp */
  6130. #undef v_ref
  6131. #undef u_ref
  6132. #undef b_ref
  6133. int NUMlapack_dhseqr (const char *job, const char *compz, integer *n, integer *ilo, integer *ihi, double *h__, integer *ldh,
  6134. double *wr, double *wi, double *z__, integer *ldz, double *work, integer *lwork, integer *info) {
  6135. /* Table of constant values */
  6136. static double c_b9 = 0.;
  6137. static double c_b10 = 1.;
  6138. static integer c__4 = 4;
  6139. static integer c_n1 = -1;
  6140. static integer c__2 = 2;
  6141. static integer c__8 = 8;
  6142. static integer c__15 = 15;
  6143. static int c_false = FALSE;
  6144. static integer c__1 = 1;
  6145. /* System generated locals */
  6146. const char *a__1[2];
  6147. integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3[2], i__4, i__5;
  6148. double d__1, d__2;
  6149. char ch__1[2];
  6150. /* Local variables */
  6151. static integer maxb;
  6152. static double absw;
  6153. static integer ierr;
  6154. static double unfl, temp, ovfl;
  6155. static integer i__, j, k, l;
  6156. static double s[225] /* was [15][15] */ , v[16];
  6157. static integer itemp;
  6158. static integer i1, i2;
  6159. static int initz, wantt, wantz;
  6160. static integer ii, nh;
  6161. static integer nr, ns;
  6162. static integer nv;
  6163. static double vv[16];
  6164. static double smlnum;
  6165. static int lquery;
  6166. static integer itn;
  6167. static double tau;
  6168. static integer its;
  6169. static double ulp, tst1;
  6170. #define h___ref(a_1,a_2) h__[(a_2)*h_dim1 + a_1]
  6171. #define s_ref(a_1,a_2) s[(a_2)*15 + a_1 - 16]
  6172. #define z___ref(a_1,a_2) z__[(a_2)*z_dim1 + a_1]
  6173. h_dim1 = *ldh;
  6174. h_offset = 1 + h_dim1 * 1;
  6175. h__ -= h_offset;
  6176. --wr;
  6177. --wi;
  6178. z_dim1 = *ldz;
  6179. z_offset = 1 + z_dim1 * 1;
  6180. z__ -= z_offset;
  6181. --work;
  6182. /* Function Body */
  6183. wantt = lsame_ (job, "S");
  6184. initz = lsame_ (compz, "I");
  6185. wantz = initz || lsame_ (compz, "V");
  6186. *info = 0;
  6187. work[1] = (double) MAX (1, *n);
  6188. lquery = *lwork == -1;
  6189. if (!lsame_ (job, "E") && !wantt) {
  6190. *info = -1;
  6191. } else if (!lsame_ (compz, "N") && !wantz) {
  6192. *info = -2;
  6193. } else if (*n < 0) {
  6194. *info = -3;
  6195. } else if (*ilo < 1 || *ilo > MAX (1, *n)) {
  6196. *info = -4;
  6197. } else if (*ihi < MIN (*ilo, *n) || *ihi > *n) {
  6198. *info = -5;
  6199. } else if (*ldh < MAX (1, *n)) {
  6200. *info = -7;
  6201. } else if (*ldz < 1 || wantz && *ldz < MAX (1, *n)) {
  6202. *info = -11;
  6203. } else if (*lwork < MAX (1, *n) && !lquery) {
  6204. *info = -13;
  6205. }
  6206. if (*info != 0) {
  6207. i__1 = - (*info);
  6208. xerbla_ ("NUMlapack_dhseqr ", &i__1);
  6209. return 0;
  6210. } else if (lquery) {
  6211. return 0;
  6212. }
  6213. /* Initialize Z, if necessary */
  6214. if (initz) {
  6215. NUMlapack_dlaset ("Full", n, n, &c_b9, &c_b10, &z__[z_offset], ldz);
  6216. }
  6217. /* Store the eigenvalues isolated by NUMlapack_dgebal. */
  6218. i__1 = *ilo - 1;
  6219. for (i__ = 1; i__ <= i__1; ++i__) {
  6220. wr[i__] = h___ref (i__, i__);
  6221. wi[i__] = 0.;
  6222. /* L10: */
  6223. }
  6224. i__1 = *n;
  6225. for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
  6226. wr[i__] = h___ref (i__, i__);
  6227. wi[i__] = 0.;
  6228. /* L20: */
  6229. }
  6230. /* Quick return if possible. */
  6231. if (*n == 0) {
  6232. return 0;
  6233. }
  6234. if (*ilo == *ihi) {
  6235. wr[*ilo] = h___ref (*ilo, *ilo);
  6236. wi[*ilo] = 0.;
  6237. return 0;
  6238. }
  6239. /* Set rows and columns ILO to IHI to zero below the first subdiagonal. */
  6240. i__1 = *ihi - 2;
  6241. for (j = *ilo; j <= i__1; ++j) {
  6242. i__2 = *n;
  6243. for (i__ = j + 2; i__ <= i__2; ++i__) {
  6244. h___ref (i__, j) = 0.;
  6245. /* L30: */
  6246. }
  6247. /* L40: */
  6248. }
  6249. nh = *ihi - *ilo + 1;
  6250. /* Determine the order of the multi-shift QR algorithm to be used.
  6251. Writing concatenation */
  6252. i__3[0] = 1, a__1[0] = job;
  6253. i__3[1] = 1, a__1[1] = compz;
  6254. s_cat ( (char *) ch__1, a__1, i__3, &c__2, 2);
  6255. ns = NUMlapack_ilaenv (&c__4, "NUMlapack_dhseqr ", ch__1, n, ilo, ihi, &c_n1, 6, 2);
  6256. /* Writing concatenation */
  6257. i__3[0] = 1, a__1[0] = job;
  6258. i__3[1] = 1, a__1[1] = compz;
  6259. s_cat (ch__1, a__1, i__3, &c__2, 2);
  6260. maxb = NUMlapack_ilaenv (&c__8, "NUMlapack_dhseqr ", ch__1, n, ilo, ihi, &c_n1, 6, 2);
  6261. if (ns <= 2 || ns > nh || maxb >= nh) {
  6262. /* Use the standard double-shift algorithm */
  6263. NUMlapack_dlahqr (&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[1], ilo, ihi,
  6264. &z__[z_offset], ldz, info);
  6265. return 0;
  6266. }
  6267. maxb = MAX (3, maxb);
  6268. /* Computing MIN */
  6269. i__1 = MIN (ns, maxb);
  6270. ns = MIN (i__1, 15);
  6271. /* Now 2 < NS <= MAXB < NH.
  6272. Set machine-dependent constants for the stopping criterion. If norm(H)
  6273. <= sqrt(OVFL), overflow should not occur. */
  6274. unfl = NUMblas_dlamch ("Safe minimum");
  6275. ovfl = 1. / unfl;
  6276. NUMlapack_dlabad (&unfl, &ovfl);
  6277. ulp = NUMblas_dlamch ("Precision");
  6278. smlnum = unfl * (nh / ulp);
  6279. /* I1 and I2 are the indices of the first row and last column of H to
  6280. which transformations should be applied. If eigenvalues only are being
  6281. computed, I1 and I2 are set inside the main loop. */
  6282. if (wantt) {
  6283. i1 = 1;
  6284. i2 = *n;
  6285. }
  6286. /* ITN is the total number of multiple-shift QR iterations allowed. */
  6287. itn = nh * 30;
  6288. /* The main loop begins here. I is the loop index and decreases from IHI
  6289. to ILO in steps of at most MAXB. Each iteration of the loop works with
  6290. the active submatrix in rows and columns L to I. Eigenvalues I+1 to IHI
  6291. have already converged. Either L = ILO or H(L,L-1) is negligible so
  6292. that the matrix splits. */
  6293. i__ = *ihi;
  6294. L50:
  6295. l = *ilo;
  6296. if (i__ < *ilo) {
  6297. goto L170;
  6298. }
  6299. /* Perform multiple-shift QR iterations on rows and columns ILO to I
  6300. until a submatrix of order at most MAXB splits off at the bottom
  6301. because a subdiagonal element has become negligible. */
  6302. i__1 = itn;
  6303. for (its = 0; its <= i__1; ++its) {
  6304. /* Look for a single small subdiagonal element. */
  6305. i__2 = l + 1;
  6306. for (k = i__; k >= i__2; --k) {
  6307. tst1 = (d__1 = h___ref (k - 1, k - 1), fabs (d__1)) + (d__2 = h___ref (k, k), fabs (d__2));
  6308. if (tst1 == 0.) {
  6309. i__4 = i__ - l + 1;
  6310. tst1 = NUMlapack_dlanhs ("1", &i__4, &h___ref (l, l), ldh, &work[1]);
  6311. }
  6312. /* Computing MAX */
  6313. d__2 = ulp * tst1;
  6314. if ( (d__1 = h___ref (k, k - 1), fabs (d__1)) <= MAX (d__2, smlnum)) {
  6315. goto L70;
  6316. }
  6317. /* L60: */
  6318. }
  6319. L70:
  6320. l = k;
  6321. if (l > *ilo) {
  6322. /* H(L,L-1) is negligible. */
  6323. h___ref (l, l - 1) = 0.;
  6324. }
  6325. /* Exit from loop if a submatrix of order <= MAXB has split off. */
  6326. if (l >= i__ - maxb + 1) {
  6327. goto L160;
  6328. }
  6329. /* Now the active submatrix is in rows and columns L to I. If
  6330. eigenvalues only are being computed, only the active submatrix
  6331. need be transformed. */
  6332. if (!wantt) {
  6333. i1 = l;
  6334. i2 = i__;
  6335. }
  6336. if (its == 20 || its == 30) {
  6337. /* Exceptional shifts. */
  6338. i__2 = i__;
  6339. for (ii = i__ - ns + 1; ii <= i__2; ++ii) {
  6340. wr[ii] = ( (d__1 = h___ref (ii, ii - 1), fabs (d__1)) + (d__2 =
  6341. h___ref (ii, ii), fabs (d__2))) * 1.5;
  6342. wi[ii] = 0.;
  6343. /* L80: */
  6344. }
  6345. } else {
  6346. /* Use eigenvalues of trailing submatrix of order NS as shifts. */
  6347. NUMlapack_dlacpy ("Full", &ns, &ns, &h___ref (i__ - ns + 1, i__ - ns + 1), ldh, s, &c__15);
  6348. NUMlapack_dlahqr (&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &wr[i__ - ns + 1],
  6349. &wi[i__ - ns + 1], &c__1, &ns, &z__[z_offset], ldz, &ierr);
  6350. if (ierr > 0) {
  6351. /* If NUMlapack_dlahqr failed to compute all NS eigenvalues,
  6352. use the unconverged diagonal elements as the remaining
  6353. shifts. */
  6354. i__2 = ierr;
  6355. for (ii = 1; ii <= i__2; ++ii) {
  6356. wr[i__ - ns + ii] = s_ref (ii, ii);
  6357. wi[i__ - ns + ii] = 0.;
  6358. /* L90: */
  6359. }
  6360. }
  6361. }
  6362. /* Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns)) where G
  6363. is the Hessenberg submatrix H(L:I,L:I) and w is the vector of
  6364. shifts (stored in WR and WI). The result is stored in the local
  6365. array V. */
  6366. v[0] = 1.;
  6367. i__2 = ns + 1;
  6368. for (ii = 2; ii <= i__2; ++ii) {
  6369. v[ii - 1] = 0.;
  6370. /* L100: */
  6371. }
  6372. nv = 1;
  6373. i__2 = i__;
  6374. for (j = i__ - ns + 1; j <= i__2; ++j) {
  6375. if (wi[j] >= 0.) {
  6376. if (wi[j] == 0.) {
  6377. /* real shift */
  6378. i__4 = nv + 1;
  6379. NUMblas_dcopy (&i__4, v, &c__1, vv, &c__1);
  6380. i__4 = nv + 1;
  6381. d__1 = -wr[j];
  6382. NUMblas_dgemv ("No transpose", &i__4, &nv, &c_b10, &h___ref (l, l), ldh, vv, &c__1,
  6383. &d__1, v, &c__1);
  6384. ++nv;
  6385. } else if (wi[j] > 0.) {
  6386. /* complex conjugate pair of shifts */
  6387. i__4 = nv + 1;
  6388. NUMblas_dcopy (&i__4, v, &c__1, vv, &c__1);
  6389. i__4 = nv + 1;
  6390. d__1 = wr[j] * -2.;
  6391. NUMblas_dgemv ("No transpose", &i__4, &nv, &c_b10, &h___ref (l, l), ldh, v, &c__1, &d__1,
  6392. vv, &c__1);
  6393. i__4 = nv + 1;
  6394. itemp = NUMblas_idamax (&i__4, vv, &c__1);
  6395. /* Computing MAX */
  6396. d__2 = (d__1 = vv[itemp - 1], fabs (d__1));
  6397. temp = 1. / MAX (d__2, smlnum);
  6398. i__4 = nv + 1;
  6399. NUMblas_dscal (&i__4, &temp, vv, &c__1);
  6400. absw = NUMlapack_dlapy2 (&wr[j], &wi[j]);
  6401. temp = temp * absw * absw;
  6402. i__4 = nv + 2;
  6403. i__5 = nv + 1;
  6404. NUMblas_dgemv ("No transpose", &i__4, &i__5, &c_b10, &h___ref (l, l), ldh, vv, &c__1,
  6405. &temp, v, &c__1);
  6406. nv += 2;
  6407. }
  6408. /* Scale V(1:NV) so that MAX (fabs (V(i))) = 1. If V is zero,
  6409. reset it to the unit vector. */
  6410. itemp = NUMblas_idamax (&nv, v, &c__1);
  6411. temp = (d__1 = v[itemp - 1], fabs (d__1));
  6412. if (temp == 0.) {
  6413. v[0] = 1.;
  6414. i__4 = nv;
  6415. for (ii = 2; ii <= i__4; ++ii) {
  6416. v[ii - 1] = 0.;
  6417. /* L110: */
  6418. }
  6419. } else {
  6420. temp = MAX (temp, smlnum);
  6421. d__1 = 1. / temp;
  6422. NUMblas_dscal (&nv, &d__1, v, &c__1);
  6423. }
  6424. }
  6425. /* L120: */
  6426. }
  6427. /* Multiple-shift QR step */
  6428. i__2 = i__ - 1;
  6429. for (k = l; k <= i__2; ++k) {
  6430. /* The first iteration of this loop determines a reflection G
  6431. from the vector V and applies it from left and right to H,
  6432. thus creating a nonzero bulge below the subdiagonal.
  6433. Each subsequent iteration determines a reflection G to restore
  6434. the Hessenberg form in the (K-1)th column, and thus chases the
  6435. bulge one step toward the bottom of the active submatrix. NR
  6436. is the order of G.
  6437. Computing MIN */
  6438. i__4 = ns + 1, i__5 = i__ - k + 1;
  6439. nr = MIN (i__4, i__5);
  6440. if (k > l) {
  6441. NUMblas_dcopy (&nr, &h___ref (k, k - 1), &c__1, v, &c__1);
  6442. }
  6443. NUMlapack_dlarfg (&nr, v, &v[1], &c__1, &tau);
  6444. if (k > l) {
  6445. h___ref (k, k - 1) = v[0];
  6446. i__4 = i__;
  6447. for (ii = k + 1; ii <= i__4; ++ii) {
  6448. h___ref (ii, k - 1) = 0.;
  6449. /* L130: */
  6450. }
  6451. }
  6452. v[0] = 1.;
  6453. /* Apply G from the left to transform the rows of the matrix in
  6454. columns K to I2. */
  6455. i__4 = i2 - k + 1;
  6456. NUMlapack_dlarfx ("Left", &nr, &i__4, v, &tau, &h___ref (k, k), ldh, &work[1]);
  6457. /* Apply G from the right to transform the columns of the matrix
  6458. in rows I1 to MIN (K+NR,I).
  6459. Computing MIN */
  6460. i__5 = k + nr;
  6461. i__4 = MIN (i__5, i__) - i1 + 1;
  6462. NUMlapack_dlarfx ("Right", &i__4, &nr, v, &tau, &h___ref (i1, k), ldh, &work[1]);
  6463. if (wantz) {
  6464. /* Accumulate transformations in the matrix Z */
  6465. NUMlapack_dlarfx ("Right", &nh, &nr, v, &tau, &z___ref (*ilo, k), ldz, &work[1]);
  6466. }
  6467. /* L140: */
  6468. }
  6469. /* L150: */
  6470. }
  6471. /* Failure to converge in remaining number of iterations */
  6472. *info = i__;
  6473. return 0;
  6474. L160:
  6475. /* A submatrix of order <= MAXB in rows and columns L to I has split off.
  6476. Use the double-shift QR algorithm to handle it. */
  6477. NUMlapack_dlahqr (&wantt, &wantz, n, &l, &i__, &h__[h_offset], ldh, &wr[1], &wi[1], ilo, ihi,
  6478. &z__[z_offset], ldz, info);
  6479. if (*info > 0) {
  6480. return 0;
  6481. }
  6482. /* Decrement number of remaining iterations, and return to start of the
  6483. main loop with a new value of I. */
  6484. itn -= its;
  6485. i__ = l - 1;
  6486. goto L50;
  6487. L170:
  6488. work[1] = (double) MAX (1, *n);
  6489. return 0;
  6490. } /* NUMlapack_dhseqr */
  6491. #undef z___ref
  6492. #undef s_ref
  6493. #undef h___ref
  6494. int NUMlapack_dlabad (double *smal, double *large) {
  6495. if (log10 (*large) > 2e3) {
  6496. *smal = sqrt (*smal);
  6497. *large = sqrt (*large);
  6498. }
  6499. return 0;
  6500. } /* NUMlapack_dlabad */
  6501. #define x_ref(a_1,a_2) x[(a_2)*x_dim1 + a_1]
  6502. #define y_ref(a_1,a_2) y[(a_2)*y_dim1 + a_1]
  6503. int NUMlapack_dlabrd (integer *m, integer *n, integer *nb, double *a, integer *lda, double *d__, double *e, double *tauq,
  6504. double *taup, double *x, integer *ldx, double *y, integer *ldy) {
  6505. /* Table of constant values */
  6506. static double c_b4 = -1.;
  6507. static double c_b5 = 1.;
  6508. static integer c__1 = 1;
  6509. static double c_b16 = 0.;
  6510. /* System generated locals */
  6511. integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2, i__3;
  6512. /* Local variables */
  6513. static integer i__;
  6514. a_dim1 = *lda;
  6515. a_offset = 1 + a_dim1 * 1;
  6516. a -= a_offset;
  6517. --d__;
  6518. --e;
  6519. --tauq;
  6520. --taup;
  6521. x_dim1 = *ldx;
  6522. x_offset = 1 + x_dim1 * 1;
  6523. x -= x_offset;
  6524. y_dim1 = *ldy;
  6525. y_offset = 1 + y_dim1 * 1;
  6526. y -= y_offset;
  6527. /* Function Body */
  6528. if (*m <= 0 || *n <= 0) {
  6529. return 0;
  6530. }
  6531. if (*m >= *n) {
  6532. /* Reduce to upper bidiagonal form */
  6533. i__1 = *nb;
  6534. for (i__ = 1; i__ <= i__1; ++i__) {
  6535. /* Update A(i:m,i) */
  6536. i__2 = *m - i__ + 1;
  6537. i__3 = i__ - 1;
  6538. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b4, &a_ref (i__, 1), lda, &y_ref (i__, 1), ldy, &c_b5,
  6539. &a_ref (i__, i__), &c__1);
  6540. i__2 = *m - i__ + 1;
  6541. i__3 = i__ - 1;
  6542. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b4, &x_ref (i__, 1), ldx, &a_ref (1, i__), &c__1, &c_b5,
  6543. &a_ref (i__, i__), &c__1);
  6544. /* Generate reflection Q(i) to annihilate A(i+1:m,i)
  6545. Computing MIN */
  6546. i__2 = i__ + 1;
  6547. i__3 = *m - i__ + 1;
  6548. NUMlapack_dlarfg (&i__3, &a_ref (i__, i__), &a_ref (MIN (i__2, *m), i__), &c__1, &tauq[i__]);
  6549. d__[i__] = a_ref (i__, i__);
  6550. if (i__ < *n) {
  6551. a_ref (i__, i__) = 1.;
  6552. /* Compute Y(i+1:n,i) */
  6553. i__2 = *m - i__ + 1;
  6554. i__3 = *n - i__;
  6555. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b5, &a_ref (i__, i__ + 1), lda, &a_ref (i__, i__),
  6556. &c__1, &c_b16, &y_ref (i__ + 1, i__), &c__1);
  6557. i__2 = *m - i__ + 1;
  6558. i__3 = i__ - 1;
  6559. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b5, &a_ref (i__, 1), lda, &a_ref (i__, i__), &c__1,
  6560. &c_b16, &y_ref (1, i__), &c__1);
  6561. i__2 = *n - i__;
  6562. i__3 = i__ - 1;
  6563. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b4, &y_ref (i__ + 1, 1), ldy, &y_ref (1, i__), &c__1,
  6564. &c_b5, &y_ref (i__ + 1, i__), &c__1);
  6565. i__2 = *m - i__ + 1;
  6566. i__3 = i__ - 1;
  6567. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b5, &x_ref (i__, 1), ldx, &a_ref (i__, i__), &c__1,
  6568. &c_b16, &y_ref (1, i__), &c__1);
  6569. i__2 = i__ - 1;
  6570. i__3 = *n - i__;
  6571. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b4, &a_ref (1, i__ + 1), lda, &y_ref (1, i__), &c__1,
  6572. &c_b5, &y_ref (i__ + 1, i__), &c__1);
  6573. i__2 = *n - i__;
  6574. NUMblas_dscal (&i__2, &tauq[i__], &y_ref (i__ + 1, i__), &c__1);
  6575. /* Update A(i,i+1:n) */
  6576. i__2 = *n - i__;
  6577. NUMblas_dgemv ("No transpose", &i__2, &i__, &c_b4, &y_ref (i__ + 1, 1), ldy, &a_ref (i__, 1), lda,
  6578. &c_b5, &a_ref (i__, i__ + 1), lda);
  6579. i__2 = i__ - 1;
  6580. i__3 = *n - i__;
  6581. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b4, &a_ref (1, i__ + 1), lda, &x_ref (i__, 1), ldx,
  6582. &c_b5, &a_ref (i__, i__ + 1), lda);
  6583. /* Generate reflection P(i) to annihilate A(i,i+2:n)
  6584. Computing MIN */
  6585. i__2 = i__ + 2;
  6586. i__3 = *n - i__;
  6587. NUMlapack_dlarfg (&i__3, &a_ref (i__, i__ + 1), &a_ref (i__, MIN (i__2, *n)), lda, &taup[i__]);
  6588. e[i__] = a_ref (i__, i__ + 1);
  6589. a_ref (i__, i__ + 1) = 1.;
  6590. /* Compute X(i+1:m,i) */
  6591. i__2 = *m - i__;
  6592. i__3 = *n - i__;
  6593. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b5, &a_ref (i__ + 1, i__ + 1), lda, &a_ref (i__,
  6594. i__ + 1), lda, &c_b16, &x_ref (i__ + 1, i__), &c__1);
  6595. i__2 = *n - i__;
  6596. NUMblas_dgemv ("Transpose", &i__2, &i__, &c_b5, &y_ref (i__ + 1, 1), ldy, &a_ref (i__, i__ + 1), lda,
  6597. &c_b16, &x_ref (1, i__), &c__1);
  6598. i__2 = *m - i__;
  6599. NUMblas_dgemv ("No transpose", &i__2, &i__, &c_b4, &a_ref (i__ + 1, 1), lda, &x_ref (1, i__), &c__1,
  6600. &c_b5, &x_ref (i__ + 1, i__), &c__1);
  6601. i__2 = i__ - 1;
  6602. i__3 = *n - i__;
  6603. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b5, &a_ref (1, i__ + 1), lda, &a_ref (i__, i__ + 1),
  6604. lda, &c_b16, &x_ref (1, i__), &c__1);
  6605. i__2 = *m - i__;
  6606. i__3 = i__ - 1;
  6607. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b4, &x_ref (i__ + 1, 1), ldx, &x_ref (1, i__), &c__1,
  6608. &c_b5, &x_ref (i__ + 1, i__), &c__1);
  6609. i__2 = *m - i__;
  6610. NUMblas_dscal (&i__2, &taup[i__], &x_ref (i__ + 1, i__), &c__1);
  6611. }
  6612. /* L10: */
  6613. }
  6614. } else {
  6615. /* Reduce to lower bidiagonal form */
  6616. i__1 = *nb;
  6617. for (i__ = 1; i__ <= i__1; ++i__) {
  6618. /* Update A(i,i:n) */
  6619. i__2 = *n - i__ + 1;
  6620. i__3 = i__ - 1;
  6621. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b4, &y_ref (i__, 1), ldy, &a_ref (i__, 1), lda, &c_b5,
  6622. &a_ref (i__, i__), lda);
  6623. i__2 = i__ - 1;
  6624. i__3 = *n - i__ + 1;
  6625. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b4, &a_ref (1, i__), lda, &x_ref (i__, 1), ldx, &c_b5,
  6626. &a_ref (i__, i__), lda);
  6627. /* Generate reflection P(i) to annihilate A(i,i+1:n)
  6628. Computing MIN */
  6629. i__2 = i__ + 1;
  6630. i__3 = *n - i__ + 1;
  6631. NUMlapack_dlarfg (&i__3, &a_ref (i__, i__), &a_ref (i__, MIN (i__2, *n)), lda, &taup[i__]);
  6632. d__[i__] = a_ref (i__, i__);
  6633. if (i__ < *m) {
  6634. a_ref (i__, i__) = 1.;
  6635. /* Compute X(i+1:m,i) */
  6636. i__2 = *m - i__;
  6637. i__3 = *n - i__ + 1;
  6638. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b5, &a_ref (i__ + 1, i__), lda, &a_ref (i__, i__),
  6639. lda, &c_b16, &x_ref (i__ + 1, i__), &c__1);
  6640. i__2 = *n - i__ + 1;
  6641. i__3 = i__ - 1;
  6642. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b5, &y_ref (i__, 1), ldy, &a_ref (i__, i__), lda,
  6643. &c_b16, &x_ref (1, i__), &c__1);
  6644. i__2 = *m - i__;
  6645. i__3 = i__ - 1;
  6646. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b4, &a_ref (i__ + 1, 1), lda, &x_ref (1, i__), &c__1,
  6647. &c_b5, &x_ref (i__ + 1, i__), &c__1);
  6648. i__2 = i__ - 1;
  6649. i__3 = *n - i__ + 1;
  6650. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b5, &a_ref (1, i__), lda, &a_ref (i__, i__), lda,
  6651. &c_b16, &x_ref (1, i__), &c__1);
  6652. i__2 = *m - i__;
  6653. i__3 = i__ - 1;
  6654. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b4, &x_ref (i__ + 1, 1), ldx, &x_ref (1, i__), &c__1,
  6655. &c_b5, &x_ref (i__ + 1, i__), &c__1);
  6656. i__2 = *m - i__;
  6657. NUMblas_dscal (&i__2, &taup[i__], &x_ref (i__ + 1, i__), &c__1);
  6658. /* Update A(i+1:m,i) */
  6659. i__2 = *m - i__;
  6660. i__3 = i__ - 1;
  6661. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b4, &a_ref (i__ + 1, 1), lda, &y_ref (i__, 1), ldy,
  6662. &c_b5, &a_ref (i__ + 1, i__), &c__1);
  6663. i__2 = *m - i__;
  6664. NUMblas_dgemv ("No transpose", &i__2, &i__, &c_b4, &x_ref (i__ + 1, 1), ldx, &a_ref (1, i__), &c__1,
  6665. &c_b5, &a_ref (i__ + 1, i__), &c__1);
  6666. /* Generate reflection Q(i) to annihilate A(i+2:m,i)
  6667. Computing MIN */
  6668. i__2 = i__ + 2;
  6669. i__3 = *m - i__;
  6670. NUMlapack_dlarfg (&i__3, &a_ref (i__ + 1, i__), &a_ref (MIN (i__2, *m), i__), &c__1,
  6671. &tauq[i__]);
  6672. e[i__] = a_ref (i__ + 1, i__);
  6673. a_ref (i__ + 1, i__) = 1.;
  6674. /* Compute Y(i+1:n,i) */
  6675. i__2 = *m - i__;
  6676. i__3 = *n - i__;
  6677. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b5, &a_ref (i__ + 1, i__ + 1), lda, &a_ref (i__ + 1,
  6678. i__), &c__1, &c_b16, &y_ref (i__ + 1, i__), &c__1);
  6679. i__2 = *m - i__;
  6680. i__3 = i__ - 1;
  6681. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b5, &a_ref (i__ + 1, 1), lda, &a_ref (i__ + 1, i__),
  6682. &c__1, &c_b16, &y_ref (1, i__), &c__1);
  6683. i__2 = *n - i__;
  6684. i__3 = i__ - 1;
  6685. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b4, &y_ref (i__ + 1, 1), ldy, &y_ref (1, i__), &c__1,
  6686. &c_b5, &y_ref (i__ + 1, i__), &c__1);
  6687. i__2 = *m - i__;
  6688. NUMblas_dgemv ("Transpose", &i__2, &i__, &c_b5, &x_ref (i__ + 1, 1), ldx, &a_ref (i__ + 1, i__),
  6689. &c__1, &c_b16, &y_ref (1, i__), &c__1);
  6690. i__2 = *n - i__;
  6691. NUMblas_dgemv ("Transpose", &i__, &i__2, &c_b4, &a_ref (1, i__ + 1), lda, &y_ref (1, i__), &c__1,
  6692. &c_b5, &y_ref (i__ + 1, i__), &c__1);
  6693. i__2 = *n - i__;
  6694. NUMblas_dscal (&i__2, &tauq[i__], &y_ref (i__ + 1, i__), &c__1);
  6695. }
  6696. /* L20: */
  6697. }
  6698. }
  6699. return 0;
  6700. } /* NUMlapack_dlabrd */
  6701. #undef y_ref
  6702. #undef x_ref
  6703. #define b_ref(a_1,a_2) b[(a_2)*b_dim1 + a_1]
  6704. int NUMlapack_dlacpy (const char *uplo, integer *m, integer *n, double *a, integer *lda, double *b, integer *ldb) {
  6705. /* System generated locals */
  6706. integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
  6707. /* Local variables */
  6708. static integer i__, j;
  6709. a_dim1 = *lda;
  6710. a_offset = 1 + a_dim1 * 1;
  6711. a -= a_offset;
  6712. b_dim1 = *ldb;
  6713. b_offset = 1 + b_dim1 * 1;
  6714. b -= b_offset;
  6715. /* Function Body */
  6716. if (lsame_ (uplo, "U")) {
  6717. i__1 = *n;
  6718. for (j = 1; j <= i__1; ++j) {
  6719. i__2 = MIN (j, *m);
  6720. for (i__ = 1; i__ <= i__2; ++i__) {
  6721. b_ref (i__, j) = a_ref (i__, j);
  6722. /* L10: */
  6723. }
  6724. /* L20: */
  6725. }
  6726. } else if (lsame_ (uplo, "L")) {
  6727. i__1 = *n;
  6728. for (j = 1; j <= i__1; ++j) {
  6729. i__2 = *m;
  6730. for (i__ = j; i__ <= i__2; ++i__) {
  6731. b_ref (i__, j) = a_ref (i__, j);
  6732. /* L30: */
  6733. }
  6734. /* L40: */
  6735. }
  6736. } else {
  6737. i__1 = *n;
  6738. for (j = 1; j <= i__1; ++j) {
  6739. i__2 = *m;
  6740. for (i__ = 1; i__ <= i__2; ++i__) {
  6741. b_ref (i__, j) = a_ref (i__, j);
  6742. /* L50: */
  6743. }
  6744. /* L60: */
  6745. }
  6746. }
  6747. return 0;
  6748. } /* NUMlapack_dlacpy */
  6749. #undef b_ref
  6750. int NUMlapack_dladiv (double *a, double *b, double *c, double *d, double *p, double *q) {
  6751. static double e, f;
  6752. if (fabs (*d) < fabs (*c)) {
  6753. e = *d / *c;
  6754. f = *c + *d * e;
  6755. *p = (*a + *b * e) / f;
  6756. *q = (*b - *a * e) / f;
  6757. } else {
  6758. e = *c / *d;
  6759. f = *d + *c * e;
  6760. *p = (*b + *a * e) / f;
  6761. *q = (- (*a) + *b * e) / f;
  6762. }
  6763. return 0;
  6764. } /* NUMlapack_dladiv */
  6765. int NUMlapack_dlae2 (double *a, double *b, double *c__, double *rt1, double *rt2) {
  6766. /* System generated locals */
  6767. double d__1;
  6768. /* Local variables */
  6769. static double acmn, acmx, ab, df, tb, sm, rt, adf;
  6770. sm = *a + *c__;
  6771. df = *a - *c__;
  6772. adf = fabs (df);
  6773. tb = *b + *b;
  6774. ab = fabs (tb);
  6775. // djmw 20110721 changed abs(*a) to fabs(*a)
  6776. if (fabs (*a) > fabs (*c__)) {
  6777. acmx = *a;
  6778. acmn = *c__;
  6779. } else {
  6780. acmx = *c__;
  6781. acmn = *a;
  6782. }
  6783. if (adf > ab) {
  6784. /* Computing 2nd power */
  6785. d__1 = ab / adf;
  6786. rt = adf * sqrt (d__1 * d__1 + 1.);
  6787. } else if (adf < ab) {
  6788. /* Computing 2nd power */
  6789. d__1 = adf / ab;
  6790. rt = ab * sqrt (d__1 * d__1 + 1.);
  6791. } else {
  6792. /* Includes case AB=ADF=0 */
  6793. rt = ab * sqrt (2.);
  6794. }
  6795. if (sm < 0.) {
  6796. *rt1 = (sm - rt) * .5;
  6797. /* Order of execution important. To get fully accurate smaller
  6798. eigenvalue, next line needs to be executed in higher precision. */
  6799. *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
  6800. } else if (sm > 0.) {
  6801. *rt1 = (sm + rt) * .5;
  6802. /* Order of execution important. To get fully accurate smaller
  6803. eigenvalue, next line needs to be executed in higher precision. */
  6804. *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
  6805. } else {
  6806. /* Includes case RT1 = RT2 = 0 */
  6807. *rt1 = rt * .5;
  6808. *rt2 = rt * -.5;
  6809. }
  6810. return 0;
  6811. } /* NUMlapack_dlae2 */
  6812. int NUMlapack_dlaev2 (double *a, double *b, double *c__, double *rt1, double *rt2, double *cs1, double *sn1) {
  6813. /* System generated locals */
  6814. double d__1;
  6815. /* Local variables */
  6816. static double acmn, acmx, ab, df, cs, ct, tb, sm, tn, rt, adf, acs;
  6817. static integer sgn1, sgn2;
  6818. sm = *a + *c__;
  6819. df = *a - *c__;
  6820. adf = fabs (df);
  6821. tb = *b + *b;
  6822. ab = fabs (tb);
  6823. if (fabs (*a) > fabs (*c__)) {
  6824. acmx = *a;
  6825. acmn = *c__;
  6826. } else {
  6827. acmx = *c__;
  6828. acmn = *a;
  6829. }
  6830. if (adf > ab) {
  6831. /* Computing 2nd power */
  6832. d__1 = ab / adf;
  6833. rt = adf * sqrt (d__1 * d__1 + 1.);
  6834. } else if (adf < ab) {
  6835. /* Computing 2nd power */
  6836. d__1 = adf / ab;
  6837. rt = ab * sqrt (d__1 * d__1 + 1.);
  6838. } else {
  6839. /* Includes case AB=ADF=0 */
  6840. rt = ab * sqrt (2.);
  6841. }
  6842. if (sm < 0.) {
  6843. *rt1 = (sm - rt) * .5;
  6844. sgn1 = -1;
  6845. /* Order of execution important. To get fully accurate smaller
  6846. eigenvalue, next line needs to be executed in higher precision. */
  6847. *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
  6848. } else if (sm > 0.) {
  6849. *rt1 = (sm + rt) * .5;
  6850. sgn1 = 1;
  6851. /* Order of execution important. To get fully accurate smaller
  6852. eigenvalue, next line needs to be executed in higher precision. */
  6853. *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
  6854. } else {
  6855. /* Includes case RT1 = RT2 = 0 */
  6856. *rt1 = rt * .5;
  6857. *rt2 = rt * -.5;
  6858. sgn1 = 1;
  6859. }
  6860. /* Compute the eigenvector */
  6861. if (df >= 0.) {
  6862. cs = df + rt;
  6863. sgn2 = 1;
  6864. } else {
  6865. cs = df - rt;
  6866. sgn2 = -1;
  6867. }
  6868. acs = fabs (cs);
  6869. if (acs > ab) {
  6870. ct = -tb / cs;
  6871. *sn1 = 1. / sqrt (ct * ct + 1.);
  6872. *cs1 = ct * *sn1;
  6873. } else {
  6874. if (ab == 0.) {
  6875. *cs1 = 1.;
  6876. *sn1 = 0.;
  6877. } else {
  6878. tn = -cs / tb;
  6879. *cs1 = 1. / sqrt (tn * tn + 1.);
  6880. *sn1 = tn * *cs1;
  6881. }
  6882. }
  6883. if (sgn1 == sgn2) {
  6884. tn = *cs1;
  6885. *cs1 = - (*sn1);
  6886. *sn1 = tn;
  6887. }
  6888. return 0;
  6889. } /* NUMlapack_dlaev2 */
  6890. int NUMlapack_dlags2 (integer *upper, double *a1, double *a2, double *a3, double *b1, double *b2, double *b3,
  6891. double *csu, double *snu, double *csv, double *snv, double *csq, double *snq) {
  6892. /* System generated locals */
  6893. double d__1;
  6894. /* Local variables */
  6895. static double aua11, aua12, aua21, aua22, avb11, avb12, avb21, avb22;
  6896. double ua11r, ua22r, vb11r, vb22r, a, b, c__, d__, r__, s1, s2;
  6897. static double ua11, ua12, ua21, ua22, vb11, vb12, vb21, vb22, csl, csr, snl, snr;
  6898. if (*upper) {
  6899. /* Input matrices A and B are upper triangular matrices
  6900. Form matrix C = A*adj(B) = ( a b ) ( 0 d ) */
  6901. a = *a1 * *b3;
  6902. d__ = *a3 * *b1;
  6903. b = *a2 * *b1 - *a1 * *b2;
  6904. /* The SVD of real 2-by-2 triangular C
  6905. ( CSL -SNL )*( A B )*( CSR SNR ) = ( R 0 ) ( SNL CSL ) ( 0 D ) (
  6906. -SNR CSR ) ( 0 T ) */
  6907. NUMlapack_dlasv2 (&a, &b, &d__, &s1, &s2, &snr, &csr, &snl, &csl);
  6908. if (fabs (csl) >= fabs (snl) || fabs (csr) >= fabs (snr)) {
  6909. /* Compute the (1,1) and (1,2) elements of U'*A and V'*B, and
  6910. (1,2) element of |U|'*|A| and |V|'*|B|. */
  6911. ua11r = csl * *a1;
  6912. ua12 = csl * *a2 + snl * *a3;
  6913. vb11r = csr * *b1;
  6914. vb12 = csr * *b2 + snr * *b3;
  6915. aua12 = fabs (csl) * fabs (*a2) + fabs (snl) * fabs (*a3);
  6916. avb12 = fabs (csr) * fabs (*b2) + fabs (snr) * fabs (*b3);
  6917. /* zero (1,2) elements of U'*A and V'*B */
  6918. if (fabs (ua11r) + fabs (ua12) != 0.) {
  6919. if (aua12 / (fabs (ua11r) + fabs (ua12)) <= avb12 / (fabs (vb11r) + fabs (vb12))) {
  6920. d__1 = -ua11r;
  6921. NUMlapack_dlartg (&d__1, &ua12, csq, snq, &r__);
  6922. } else {
  6923. d__1 = -vb11r;
  6924. NUMlapack_dlartg (&d__1, &vb12, csq, snq, &r__);
  6925. }
  6926. } else {
  6927. d__1 = -vb11r;
  6928. NUMlapack_dlartg (&d__1, &vb12, csq, snq, &r__);
  6929. }
  6930. *csu = csl;
  6931. *snu = -snl;
  6932. *csv = csr;
  6933. *snv = -snr;
  6934. } else {
  6935. /* Compute the (2,1) and (2,2) elements of U'*A and V'*B, and
  6936. (2,2) element of |U|'*|A| and |V|'*|B|. */
  6937. ua21 = -snl * *a1;
  6938. ua22 = -snl * *a2 + csl * *a3;
  6939. vb21 = -snr * *b1;
  6940. vb22 = -snr * *b2 + csr * *b3;
  6941. aua22 = fabs (snl) * fabs (*a2) + fabs (csl) * fabs (*a3);
  6942. avb22 = fabs (snr) * fabs (*b2) + fabs (csr) * fabs (*b3);
  6943. /* zero (2,2) elements of U'*A and V'*B, and then swap. */
  6944. if (fabs (ua21) + fabs (ua22) != 0.) {
  6945. if (aua22 / (fabs (ua21) + fabs (ua22)) <= avb22 / (fabs (vb21) + fabs (vb22))) {
  6946. d__1 = -ua21;
  6947. NUMlapack_dlartg (&d__1, &ua22, csq, snq, &r__);
  6948. } else {
  6949. d__1 = -vb21;
  6950. NUMlapack_dlartg (&d__1, &vb22, csq, snq, &r__);
  6951. }
  6952. } else {
  6953. d__1 = -vb21;
  6954. NUMlapack_dlartg (&d__1, &vb22, csq, snq, &r__);
  6955. }
  6956. *csu = snl;
  6957. *snu = csl;
  6958. *csv = snr;
  6959. *snv = csr;
  6960. }
  6961. } else {
  6962. /* Input matrices A and B are lower triangular matrices Form matrix C
  6963. = A*adj(B) = ( a 0 ) ( c d ) */
  6964. a = *a1 * *b3;
  6965. d__ = *a3 * *b1;
  6966. c__ = *a2 * *b3 - *a3 * *b2;
  6967. /* The SVD of real 2-by-2 triangular C
  6968. ( CSL -SNL )*( A 0 )*( CSR SNR ) = ( R 0 ) ( SNL CSL ) ( C D ) (
  6969. -SNR CSR ) ( 0 T ) */
  6970. NUMlapack_dlasv2 (&a, &c__, &d__, &s1, &s2, &snr, &csr, &snl, &csl);
  6971. if (fabs (csr) >= fabs (snr) || fabs (csl) >= fabs (snl)) {
  6972. /* Compute the (2,1) and (2,2) elements of U'*A and V'*B, and
  6973. (2,1) element of |U|'*|A| and |V|'*|B|. */
  6974. ua21 = -snr * *a1 + csr * *a2;
  6975. ua22r = csr * *a3;
  6976. vb21 = -snl * *b1 + csl * *b2;
  6977. vb22r = csl * *b3;
  6978. aua21 = fabs (snr) * fabs (*a1) + fabs (csr) * fabs (*a2);
  6979. avb21 = fabs (snl) * fabs (*b1) + fabs (csl) * fabs (*b2);
  6980. /* zero (2,1) elements of U'*A and V'*B. */
  6981. if (fabs (ua21) + fabs (ua22r) != 0.) {
  6982. if (aua21 / (fabs (ua21) + fabs (ua22r)) <= avb21 / (fabs (vb21) + fabs (vb22r))) {
  6983. NUMlapack_dlartg (&ua22r, &ua21, csq, snq, &r__);
  6984. } else {
  6985. NUMlapack_dlartg (&vb22r, &vb21, csq, snq, &r__);
  6986. }
  6987. } else {
  6988. NUMlapack_dlartg (&vb22r, &vb21, csq, snq, &r__);
  6989. }
  6990. *csu = csr;
  6991. *snu = -snr;
  6992. *csv = csl;
  6993. *snv = -snl;
  6994. } else {
  6995. /* Compute the (1,1) and (1,2) elements of U'*A and V'*B, and
  6996. (1,1) element of |U|'*|A| and |V|'*|B|. */
  6997. ua11 = csr * *a1 + snr * *a2;
  6998. ua12 = snr * *a3;
  6999. vb11 = csl * *b1 + snl * *b2;
  7000. vb12 = snl * *b3;
  7001. aua11 = fabs (csr) * fabs (*a1) + fabs (snr) * fabs (*a2);
  7002. avb11 = fabs (csl) * fabs (*b1) + fabs (snl) * fabs (*b2);
  7003. /* zero (1,1) elements of U'*A and V'*B, and then swap. */
  7004. if (fabs (ua11) + fabs (ua12) != 0.) {
  7005. if (aua11 / (fabs (ua11) + fabs (ua12)) <= avb11 / (fabs (vb11) + fabs (vb12))) {
  7006. NUMlapack_dlartg (&ua12, &ua11, csq, snq, &r__);
  7007. } else {
  7008. NUMlapack_dlartg (&vb12, &vb11, csq, snq, &r__);
  7009. }
  7010. } else {
  7011. NUMlapack_dlartg (&vb12, &vb11, csq, snq, &r__);
  7012. }
  7013. *csu = snr;
  7014. *snu = csr;
  7015. *csv = snl;
  7016. *snv = csl;
  7017. }
  7018. }
  7019. return 0;
  7020. } /* NUMlapack_dlags2 */
  7021. int NUMlapack_dlahqr (int *wantt, int *wantz, integer *n, integer *ilo, integer *ihi, double *h__, integer *ldh,
  7022. double *wr, double *wi, integer *iloz, integer *ihiz, double *z__, integer *ldz, integer *info) {
  7023. /* Table of constant values */
  7024. static integer c__1 = 1;
  7025. /* System generated locals */
  7026. integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
  7027. double d__1, d__2;
  7028. /* Local variables */
  7029. static double h43h34, disc, unfl, ovfl;
  7030. static double work[1];
  7031. static integer i__, j, k, l, m;
  7032. static double s, v[3];
  7033. static integer i1, i2;
  7034. static double t1, t2, t3, v1, v2, v3;
  7035. static double h00, h10, h11, h12, h21, h22, h33, h44;
  7036. static integer nh;
  7037. static double cs;
  7038. static integer nr;
  7039. static double sn;
  7040. static integer nz;
  7041. static double smlnum, ave, h33s, h44s;
  7042. static integer itn, its;
  7043. static double ulp, sum, tst1;
  7044. #define h___ref(a_1,a_2) h__[(a_2)*h_dim1 + a_1]
  7045. #define z___ref(a_1,a_2) z__[(a_2)*z_dim1 + a_1]
  7046. h_dim1 = *ldh;
  7047. h_offset = 1 + h_dim1 * 1;
  7048. h__ -= h_offset;
  7049. --wr;
  7050. --wi;
  7051. z_dim1 = *ldz;
  7052. z_offset = 1 + z_dim1 * 1;
  7053. z__ -= z_offset;
  7054. /* Function Body */
  7055. *info = 0;
  7056. /* Quick return if possible */
  7057. if (*n == 0) {
  7058. return 0;
  7059. }
  7060. if (*ilo == *ihi) {
  7061. wr[*ilo] = h___ref (*ilo, *ilo);
  7062. wi[*ilo] = 0.;
  7063. return 0;
  7064. }
  7065. nh = *ihi - *ilo + 1;
  7066. nz = *ihiz - *iloz + 1;
  7067. /* Set machine-dependent constants for the stopping criterion. If norm(H)
  7068. <= sqrt(OVFL), overflow should not occur. */
  7069. unfl = NUMblas_dlamch ("Safe minimum");
  7070. ovfl = 1. / unfl;
  7071. NUMlapack_dlabad (&unfl, &ovfl);
  7072. ulp = NUMblas_dlamch ("Precision");
  7073. smlnum = unfl * (nh / ulp);
  7074. /* I1 and I2 are the indices of the first row and last column of H to
  7075. which transformations should be applied. If eigenvalues only are being
  7076. computed, I1 and I2 are set inside the main loop. */
  7077. if (*wantt) {
  7078. i1 = 1;
  7079. i2 = *n;
  7080. }
  7081. /* ITN is the total number of QR iterations allowed. */
  7082. itn = nh * 30;
  7083. /* The main loop begins here. I is the loop index and decreases from IHI
  7084. to ILO in steps of 1 or 2. Each iteration of the loop works with the
  7085. active submatrix in rows and columns L to I. Eigenvalues I+1 to IHI
  7086. have already converged. Either L = ILO or H(L,L-1) is negligible so
  7087. that the matrix splits. */
  7088. i__ = *ihi;
  7089. L10:
  7090. l = *ilo;
  7091. if (i__ < *ilo) {
  7092. goto L150;
  7093. }
  7094. /* Perform QR iterations on rows and columns ILO to I until a submatrix
  7095. of order 1 or 2 splits off at the bottom because a subdiagonal element
  7096. has become negligible. */
  7097. i__1 = itn;
  7098. for (its = 0; its <= i__1; ++its) {
  7099. /* Look for a single small subdiagonal element. */
  7100. i__2 = l + 1;
  7101. for (k = i__; k >= i__2; --k) {
  7102. tst1 = (d__1 = h___ref (k - 1, k - 1), fabs (d__1)) + (d__2 = h___ref (k, k), fabs (d__2));
  7103. if (tst1 == 0.) {
  7104. i__3 = i__ - l + 1;
  7105. tst1 = NUMlapack_dlanhs ("1", &i__3, &h___ref (l, l), ldh, work);
  7106. }
  7107. /* Computing MAX */
  7108. d__2 = ulp * tst1;
  7109. if ( (d__1 = h___ref (k, k - 1), fabs (d__1)) <= MAX (d__2, smlnum)) {
  7110. goto L30;
  7111. }
  7112. /* L20: */
  7113. }
  7114. L30:
  7115. l = k;
  7116. if (l > *ilo) {
  7117. /* H(L,L-1) is negligible */
  7118. h___ref (l, l - 1) = 0.;
  7119. }
  7120. /* Exit from loop if a submatrix of order 1 or 2 has split off. */
  7121. if (l >= i__ - 1) {
  7122. goto L140;
  7123. }
  7124. /* Now the active submatrix is in rows and columns L to I. If
  7125. eigenvalues only are being computed, only the active submatrix
  7126. need be transformed. */
  7127. if (! (*wantt)) {
  7128. i1 = l;
  7129. i2 = i__;
  7130. }
  7131. if (its == 10 || its == 20) {
  7132. /* Exceptional shift. */
  7133. s = (d__1 = h___ref (i__, i__ - 1), fabs (d__1)) + (d__2 =
  7134. h___ref (i__ - 1, i__ - 2), fabs (d__2));
  7135. h44 = s * .75 + h___ref (i__, i__);
  7136. h33 = h44;
  7137. h43h34 = s * -.4375 * s;
  7138. } else {
  7139. /* Prepare to use Francis' double shift (i.e. 2nd degree
  7140. generalized Rayleigh quotient) */
  7141. h44 = h___ref (i__, i__);
  7142. h33 = h___ref (i__ - 1, i__ - 1);
  7143. h43h34 = h___ref (i__, i__ - 1) * h___ref (i__ - 1, i__);
  7144. s = h___ref (i__ - 1, i__ - 2) * h___ref (i__ - 1, i__ - 2);
  7145. disc = (h33 - h44) * .5;
  7146. disc = disc * disc + h43h34;
  7147. if (disc > 0.) {
  7148. /* Real roots: use Wilkinson's shift twice */
  7149. disc = sqrt (disc);
  7150. ave = (h33 + h44) * .5;
  7151. if (fabs (h33) - fabs (h44) > 0.) {
  7152. h33 = h33 * h44 - h43h34;
  7153. h44 = h33 / (d_sign (&disc, &ave) + ave);
  7154. } else {
  7155. h44 = d_sign (&disc, &ave) + ave;
  7156. }
  7157. h33 = h44;
  7158. h43h34 = 0.;
  7159. }
  7160. }
  7161. /* Look for two consecutive small subdiagonal elements. */
  7162. i__2 = l;
  7163. for (m = i__ - 2; m >= i__2; --m) {
  7164. /* Determine the effect of starting the double-shift QR iteration
  7165. at row M, and see if this would make H(M,M-1) negligible. */
  7166. h11 = h___ref (m, m);
  7167. h22 = h___ref (m + 1, m + 1);
  7168. h21 = h___ref (m + 1, m);
  7169. h12 = h___ref (m, m + 1);
  7170. h44s = h44 - h11;
  7171. h33s = h33 - h11;
  7172. v1 = (h33s * h44s - h43h34) / h21 + h12;
  7173. v2 = h22 - h11 - h33s - h44s;
  7174. v3 = h___ref (m + 2, m + 1);
  7175. s = fabs (v1) + fabs (v2) + fabs (v3);
  7176. v1 /= s;
  7177. v2 /= s;
  7178. v3 /= s;
  7179. v[0] = v1;
  7180. v[1] = v2;
  7181. v[2] = v3;
  7182. if (m == l) {
  7183. goto L50;
  7184. }
  7185. h00 = h___ref (m - 1, m - 1);
  7186. h10 = h___ref (m, m - 1);
  7187. tst1 = fabs (v1) * (fabs (h00) + fabs (h11) + fabs (h22));
  7188. if (fabs (h10) * (fabs (v2) + fabs (v3)) <= ulp * tst1) {
  7189. goto L50;
  7190. }
  7191. /* L40: */
  7192. }
  7193. L50:
  7194. /* Double-shift QR step */
  7195. i__2 = i__ - 1;
  7196. for (k = m; k <= i__2; ++k) {
  7197. /* The first iteration of this loop determines a reflection G
  7198. from the vector V and applies it from left and right to H,
  7199. thus creating a nonzero bulge below the subdiagonal.
  7200. Each subsequent iteration determines a reflection G to restore
  7201. the Hessenberg form in the (K-1)th column, and thus chases the
  7202. bulge one step toward the bottom of the active submatrix. NR
  7203. is the order of G.
  7204. Computing MIN */
  7205. i__3 = 3, i__4 = i__ - k + 1;
  7206. nr = MIN (i__3, i__4);
  7207. if (k > m) {
  7208. NUMblas_dcopy (&nr, &h___ref (k, k - 1), &c__1, v, &c__1);
  7209. }
  7210. NUMlapack_dlarfg (&nr, v, &v[1], &c__1, &t1);
  7211. if (k > m) {
  7212. h___ref (k, k - 1) = v[0];
  7213. h___ref (k + 1, k - 1) = 0.;
  7214. if (k < i__ - 1) {
  7215. h___ref (k + 2, k - 1) = 0.;
  7216. }
  7217. } else if (m > l) {
  7218. h___ref (k, k - 1) = -h___ref (k, k - 1);
  7219. }
  7220. v2 = v[1];
  7221. t2 = t1 * v2;
  7222. if (nr == 3) {
  7223. v3 = v[2];
  7224. t3 = t1 * v3;
  7225. /* Apply G from the left to transform the rows of the matrix
  7226. in columns K to I2. */
  7227. i__3 = i2;
  7228. for (j = k; j <= i__3; ++j) {
  7229. sum = h___ref (k, j) + v2 * h___ref (k + 1, j) + v3 * h___ref (k + 2, j);
  7230. h___ref (k, j) = h___ref (k, j) - sum * t1;
  7231. h___ref (k + 1, j) = h___ref (k + 1, j) - sum * t2;
  7232. h___ref (k + 2, j) = h___ref (k + 2, j) - sum * t3;
  7233. /* L60: */
  7234. }
  7235. /* Apply G from the right to transform the columns of the
  7236. matrix in rows I1 to MIN (K+3,I).
  7237. Computing MIN */
  7238. i__4 = k + 3;
  7239. i__3 = MIN (i__4, i__);
  7240. for (j = i1; j <= i__3; ++j) {
  7241. sum = h___ref (j, k) + v2 * h___ref (j, k + 1) + v3 * h___ref (j, k + 2);
  7242. h___ref (j, k) = h___ref (j, k) - sum * t1;
  7243. h___ref (j, k + 1) = h___ref (j, k + 1) - sum * t2;
  7244. h___ref (j, k + 2) = h___ref (j, k + 2) - sum * t3;
  7245. /* L70: */
  7246. }
  7247. if (*wantz) {
  7248. /* Accumulate transformations in the matrix Z */
  7249. i__3 = *ihiz;
  7250. for (j = *iloz; j <= i__3; ++j) {
  7251. sum = z___ref (j, k) + v2 * z___ref (j, k + 1) + v3 * z___ref (j, k + 2);
  7252. z___ref (j, k) = z___ref (j, k) - sum * t1;
  7253. z___ref (j, k + 1) = z___ref (j, k + 1) - sum * t2;
  7254. z___ref (j, k + 2) = z___ref (j, k + 2) - sum * t3;
  7255. /* L80: */
  7256. }
  7257. }
  7258. } else if (nr == 2) {
  7259. /* Apply G from the left to transform the rows of the matrix
  7260. in columns K to I2. */
  7261. i__3 = i2;
  7262. for (j = k; j <= i__3; ++j) {
  7263. sum = h___ref (k, j) + v2 * h___ref (k + 1, j);
  7264. h___ref (k, j) = h___ref (k, j) - sum * t1;
  7265. h___ref (k + 1, j) = h___ref (k + 1, j) - sum * t2;
  7266. /* L90: */
  7267. }
  7268. /* Apply G from the right to transform the columns of the
  7269. matrix in rows I1 to MIN (K+3,I). */
  7270. i__3 = i__;
  7271. for (j = i1; j <= i__3; ++j) {
  7272. sum = h___ref (j, k) + v2 * h___ref (j, k + 1);
  7273. h___ref (j, k) = h___ref (j, k) - sum * t1;
  7274. h___ref (j, k + 1) = h___ref (j, k + 1) - sum * t2;
  7275. /* L100: */
  7276. }
  7277. if (*wantz) {
  7278. /* Accumulate transformations in the matrix Z */
  7279. i__3 = *ihiz;
  7280. for (j = *iloz; j <= i__3; ++j) {
  7281. sum = z___ref (j, k) + v2 * z___ref (j, k + 1);
  7282. z___ref (j, k) = z___ref (j, k) - sum * t1;
  7283. z___ref (j, k + 1) = z___ref (j, k + 1) - sum * t2;
  7284. /* L110: */
  7285. }
  7286. }
  7287. }
  7288. /* L120: */
  7289. }
  7290. /* L130: */
  7291. }
  7292. /* Failure to converge in remaining number of iterations */
  7293. *info = i__;
  7294. return 0;
  7295. L140:
  7296. if (l == i__) {
  7297. /* H(I,I-1) is negligible: one eigenvalue has converged. */
  7298. wr[i__] = h___ref (i__, i__);
  7299. wi[i__] = 0.;
  7300. } else if (l == i__ - 1) {
  7301. /* H(I-1,I-2) is negligible: a pair of eigenvalues have converged.
  7302. Transform the 2-by-2 submatrix to standard Schur form, and compute
  7303. and store the eigenvalues. */
  7304. NUMlapack_dlanv2 (&h___ref (i__ - 1, i__ - 1), &h___ref (i__ - 1, i__), &h___ref (i__, i__ - 1),
  7305. &h___ref (i__, i__), &wr[i__ - 1], &wi[i__ - 1], &wr[i__], &wi[i__], &cs, &sn);
  7306. if (*wantt) {
  7307. /* Apply the transformation to the rest of H. */
  7308. if (i2 > i__) {
  7309. i__1 = i2 - i__;
  7310. NUMblas_drot (&i__1, &h___ref (i__ - 1, i__ + 1), ldh, &h___ref (i__, i__ + 1), ldh, &cs,
  7311. &sn);
  7312. }
  7313. i__1 = i__ - i1 - 1;
  7314. NUMblas_drot (&i__1, &h___ref (i1, i__ - 1), &c__1, &h___ref (i1, i__), &c__1, &cs, &sn);
  7315. }
  7316. if (*wantz) {
  7317. /* Apply the transformation to Z. */
  7318. NUMblas_drot (&nz, &z___ref (*iloz, i__ - 1), &c__1, &z___ref (*iloz, i__), &c__1, &cs, &sn);
  7319. }
  7320. }
  7321. /* Decrement number of remaining iterations, and return to start of the
  7322. main loop with new value of I. */
  7323. itn -= its;
  7324. i__ = l - 1;
  7325. goto L10;
  7326. L150:
  7327. return 0;
  7328. } /* NUMlapack_dlahqr */
  7329. #undef z___ref
  7330. #undef h___ref
  7331. int NUMlapack_dlahrd (integer *n, integer *k, integer *nb, double *a, integer *lda, double *tau, double *t, integer *ldt,
  7332. double *y, integer *ldy) {
  7333. /* Table of constant values */
  7334. static double c_b4 = -1.;
  7335. static double c_b5 = 1.;
  7336. static integer c__1 = 1;
  7337. static double c_b38 = 0.;
  7338. /* System generated locals */
  7339. integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2, i__3;
  7340. double d__1;
  7341. /* Local variables */
  7342. static integer i__;
  7343. static double ei;
  7344. #define t_ref(a_1,a_2) t[(a_2)*t_dim1 + a_1]
  7345. #define y_ref(a_1,a_2) y[(a_2)*y_dim1 + a_1]
  7346. --tau;
  7347. a_dim1 = *lda;
  7348. a_offset = 1 + a_dim1 * 1;
  7349. a -= a_offset;
  7350. t_dim1 = *ldt;
  7351. t_offset = 1 + t_dim1 * 1;
  7352. t -= t_offset;
  7353. y_dim1 = *ldy;
  7354. y_offset = 1 + y_dim1 * 1;
  7355. y -= y_offset;
  7356. /* Function Body */
  7357. if (*n <= 1) {
  7358. return 0;
  7359. }
  7360. i__1 = *nb;
  7361. for (i__ = 1; i__ <= i__1; ++i__) {
  7362. if (i__ > 1) {
  7363. /* Update A(1:n,i)
  7364. Compute i-th column of A - Y * V' */
  7365. i__2 = i__ - 1;
  7366. NUMblas_dgemv ("No transpose", n, &i__2, &c_b4, &y[y_offset], ldy, &a_ref (*k + i__ - 1, 1), lda,
  7367. &c_b5, &a_ref (1, i__), &c__1);
  7368. /* Apply I - V * T' * V' to this column (call it b) from the
  7369. left, using the last column of T as workspace
  7370. Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) ( V2 ) ( b2 )
  7371. where V1 is unit lower triangular
  7372. w := V1' * b1 */
  7373. i__2 = i__ - 1;
  7374. NUMblas_dcopy (&i__2, &a_ref (*k + 1, i__), &c__1, &t_ref (1, *nb), &c__1);
  7375. i__2 = i__ - 1;
  7376. NUMblas_dtrmv ("Lower", "Transpose", "Unit", &i__2, &a_ref (*k + 1, 1), lda, &t_ref (1, *nb),
  7377. &c__1);
  7378. /* w := w + V2'*b2 */
  7379. i__2 = *n - *k - i__ + 1;
  7380. i__3 = i__ - 1;
  7381. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b5, &a_ref (*k + i__, 1), lda, &a_ref (*k + i__,
  7382. i__), &c__1, &c_b5, &t_ref (1, *nb), &c__1);
  7383. /* w := T'*w */
  7384. i__2 = i__ - 1;
  7385. NUMblas_dtrmv ("Upper", "Transpose", "Non-unit", &i__2, &t[t_offset], ldt, &t_ref (1, *nb),
  7386. &c__1);
  7387. /* b2 := b2 - V2*w */
  7388. i__2 = *n - *k - i__ + 1;
  7389. i__3 = i__ - 1;
  7390. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b4, &a_ref (*k + i__, 1), lda, &t_ref (1, *nb),
  7391. &c__1, &c_b5, &a_ref (*k + i__, i__), &c__1);
  7392. /* b1 := b1 - V1*w */
  7393. i__2 = i__ - 1;
  7394. NUMblas_dtrmv ("Lower", "No transpose", "Unit", &i__2, &a_ref (*k + 1, 1), lda, &t_ref (1, *nb),
  7395. &c__1);
  7396. i__2 = i__ - 1;
  7397. NUMblas_daxpy (&i__2, &c_b4, &t_ref (1, *nb), &c__1, &a_ref (*k + 1, i__), &c__1);
  7398. a_ref (*k + i__ - 1, i__ - 1) = ei;
  7399. }
  7400. /* Generate the elementary reflector H(i) to annihilate A(k+i+1:n,i)
  7401. Computing MIN */
  7402. i__2 = *k + i__ + 1;
  7403. i__3 = *n - *k - i__ + 1;
  7404. NUMlapack_dlarfg (&i__3, &a_ref (*k + i__, i__), &a_ref (MIN (i__2, *n), i__), &c__1, &tau[i__]);
  7405. ei = a_ref (*k + i__, i__);
  7406. a_ref (*k + i__, i__) = 1.;
  7407. /* Compute Y(1:n,i) */
  7408. i__2 = *n - *k - i__ + 1;
  7409. NUMblas_dgemv ("No transpose", n, &i__2, &c_b5, &a_ref (1, i__ + 1), lda, &a_ref (*k + i__, i__),
  7410. &c__1, &c_b38, &y_ref (1, i__), &c__1);
  7411. i__2 = *n - *k - i__ + 1;
  7412. i__3 = i__ - 1;
  7413. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b5, &a_ref (*k + i__, 1), lda, &a_ref (*k + i__, i__),
  7414. &c__1, &c_b38, &t_ref (1, i__), &c__1);
  7415. i__2 = i__ - 1;
  7416. NUMblas_dgemv ("No transpose", n, &i__2, &c_b4, &y[y_offset], ldy, &t_ref (1, i__), &c__1, &c_b5,
  7417. &y_ref (1, i__), &c__1);
  7418. NUMblas_dscal (n, &tau[i__], &y_ref (1, i__), &c__1);
  7419. /* Compute T(1:i,i) */
  7420. i__2 = i__ - 1;
  7421. d__1 = -tau[i__];
  7422. NUMblas_dscal (&i__2, &d__1, &t_ref (1, i__), &c__1);
  7423. i__2 = i__ - 1;
  7424. NUMblas_dtrmv ("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt, &t_ref (1, i__),
  7425. &c__1);
  7426. t_ref (i__, i__) = tau[i__];
  7427. /* L10: */
  7428. }
  7429. a_ref (*k + *nb, *nb) = ei;
  7430. return 0;
  7431. } /* NUMlapack_dlahrd */
  7432. #undef y_ref
  7433. #undef t_ref
  7434. int NUMlapack_dlaln2 (int *ltrans, integer *na, integer *nw, double *smin, double *ca, double *a, integer *lda,
  7435. double *d1, double *d2, double *b, integer *ldb, double *wr, double *wi, double *x, integer *ldx, double *scale,
  7436. double *xnorm, integer *info) {
  7437. /* Initialized data */
  7438. static int zswap[4] = { FALSE, FALSE, TRUE, TRUE };
  7439. static int rswap[4] = { FALSE, TRUE, FALSE, TRUE };
  7440. static integer ipivot[16] /* was [4][4] */ = { 1, 2, 3, 4, 2, 1, 4, 3, 3, 4, 1, 2,
  7441. 4, 3, 2, 1
  7442. };
  7443. /* System generated locals */
  7444. integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset;
  7445. double d__1, d__2, d__3, d__4, d__5, d__6;
  7446. static double equiv_0[4], equiv_1[4];
  7447. /* Local variables */
  7448. static double bbnd, cmax, ui11r, ui12s, temp, ur11r, ur12s;
  7449. static integer j;
  7450. static double u22abs;
  7451. static integer icmax;
  7452. static double bnorm, cnorm, smini;
  7453. #define ci (equiv_0)
  7454. #define cr (equiv_1)
  7455. static double bignum, bi1, bi2, br1, br2, smlnum, xi1, xi2, xr1, xr2, ci21, ci22, cr21, cr22, li21, csi,
  7456. ui11, lr21, ui12, ui22;
  7457. #define civ (equiv_0)
  7458. static double csr, ur11, ur12, ur22;
  7459. #define crv (equiv_1)
  7460. #define b_ref(a_1,a_2) b[(a_2)*b_dim1 + a_1]
  7461. #define x_ref(a_1,a_2) x[(a_2)*x_dim1 + a_1]
  7462. #define ci_ref(a_1,a_2) ci[(a_2)*2 + a_1 - 3]
  7463. #define cr_ref(a_1,a_2) cr[(a_2)*2 + a_1 - 3]
  7464. #define ipivot_ref(a_1,a_2) ipivot[(a_2)*4 + a_1 - 5]
  7465. a_dim1 = *lda;
  7466. a_offset = 1 + a_dim1 * 1;
  7467. a -= a_offset;
  7468. b_dim1 = *ldb;
  7469. b_offset = 1 + b_dim1 * 1;
  7470. b -= b_offset;
  7471. x_dim1 = *ldx;
  7472. x_offset = 1 + x_dim1 * 1;
  7473. x -= x_offset;
  7474. /* Function Body
  7475. Compute BIGNUM */
  7476. smlnum = 2. * NUMblas_dlamch ("Safe minimum");
  7477. bignum = 1. / smlnum;
  7478. smini = MAX (*smin, smlnum);
  7479. /* Don't check for input errors */
  7480. *info = 0;
  7481. /* Standard Initializations */
  7482. *scale = 1.;
  7483. if (*na == 1) {
  7484. /* 1 x 1 (i.e., scalar) system C X = B */
  7485. if (*nw == 1) {
  7486. /* Real 1x1 system.
  7487. C = ca A - w D */
  7488. csr = *ca * a_ref (1, 1) - *wr * *d1;
  7489. cnorm = fabs (csr);
  7490. /* If | C | < SMINI, use C = SMINI */
  7491. if (cnorm < smini) {
  7492. csr = smini;
  7493. cnorm = smini;
  7494. *info = 1;
  7495. }
  7496. /* Check scaling for X = B / C */
  7497. bnorm = (d__1 = b_ref (1, 1), fabs (d__1));
  7498. if (cnorm < 1. && bnorm > 1.) {
  7499. if (bnorm > bignum * cnorm) {
  7500. *scale = 1. / bnorm;
  7501. }
  7502. }
  7503. /* Compute X */
  7504. x_ref (1, 1) = b_ref (1, 1) * *scale / csr;
  7505. *xnorm = (d__1 = x_ref (1, 1), fabs (d__1));
  7506. } else {
  7507. /* Complex 1x1 system (w is complex)
  7508. C = ca A - w D */
  7509. csr = *ca * a_ref (1, 1) - *wr * *d1;
  7510. csi = - (*wi) * *d1;
  7511. cnorm = fabs (csr) + fabs (csi);
  7512. /* If | C | < SMINI, use C = SMINI */
  7513. if (cnorm < smini) {
  7514. csr = smini;
  7515. csi = 0.;
  7516. cnorm = smini;
  7517. *info = 1;
  7518. }
  7519. /* Check scaling for X = B / C */
  7520. bnorm = (d__1 = b_ref (1, 1), fabs (d__1)) + (d__2 = b_ref (1, 2), fabs (d__2));
  7521. if (cnorm < 1. && bnorm > 1.) {
  7522. if (bnorm > bignum * cnorm) {
  7523. *scale = 1. / bnorm;
  7524. }
  7525. }
  7526. /* Compute X */
  7527. d__1 = *scale * b_ref (1, 1);
  7528. d__2 = *scale * b_ref (1, 2);
  7529. NUMlapack_dladiv (&d__1, &d__2, &csr, &csi, &x_ref (1, 1), &x_ref (1, 2));
  7530. *xnorm = (d__1 = x_ref (1, 1), fabs (d__1)) + (d__2 = x_ref (1, 2), fabs (d__2));
  7531. }
  7532. } else {
  7533. /* 2x2 System
  7534. Compute the real part of C = ca A - w D (or ca A' - w D ) */
  7535. cr_ref (1, 1) = *ca * a_ref (1, 1) - *wr * *d1;
  7536. cr_ref (2, 2) = *ca * a_ref (2, 2) - *wr * *d2;
  7537. if (*ltrans) {
  7538. cr_ref (1, 2) = *ca * a_ref (2, 1);
  7539. cr_ref (2, 1) = *ca * a_ref (1, 2);
  7540. } else {
  7541. cr_ref (2, 1) = *ca * a_ref (2, 1);
  7542. cr_ref (1, 2) = *ca * a_ref (1, 2);
  7543. }
  7544. if (*nw == 1) {
  7545. /* Real 2x2 system (w is real)
  7546. Find the largest element in C */
  7547. cmax = 0.;
  7548. icmax = 0;
  7549. for (j = 1; j <= 4; ++j) {
  7550. if ( (d__1 = crv[j - 1], fabs (d__1)) > cmax) {
  7551. cmax = (d__1 = crv[j - 1], fabs (d__1));
  7552. icmax = j;
  7553. }
  7554. /* L10: */
  7555. }
  7556. /* If norm(C) < SMINI, use SMINI*identity. */
  7557. if (cmax < smini) {
  7558. /* Computing MAX */
  7559. d__3 = (d__1 = b_ref (1, 1), fabs (d__1)), d__4 = (d__2 = b_ref (2, 1), fabs (d__2));
  7560. bnorm = MAX (d__3, d__4);
  7561. if (smini < 1. && bnorm > 1.) {
  7562. if (bnorm > bignum * smini) {
  7563. *scale = 1. / bnorm;
  7564. }
  7565. }
  7566. temp = *scale / smini;
  7567. x_ref (1, 1) = temp * b_ref (1, 1);
  7568. x_ref (2, 1) = temp * b_ref (2, 1);
  7569. *xnorm = temp * bnorm;
  7570. *info = 1;
  7571. return 0;
  7572. }
  7573. /* Gaussian elimination with complete pivoting. */
  7574. ur11 = crv[icmax - 1];
  7575. cr21 = crv[ipivot_ref (2, icmax) - 1];
  7576. ur12 = crv[ipivot_ref (3, icmax) - 1];
  7577. cr22 = crv[ipivot_ref (4, icmax) - 1];
  7578. ur11r = 1. / ur11;
  7579. lr21 = ur11r * cr21;
  7580. ur22 = cr22 - ur12 * lr21;
  7581. /* If smaller pivot < SMINI, use SMINI */
  7582. if (fabs (ur22) < smini) {
  7583. ur22 = smini;
  7584. *info = 1;
  7585. }
  7586. if (rswap[icmax - 1]) {
  7587. br1 = b_ref (2, 1);
  7588. br2 = b_ref (1, 1);
  7589. } else {
  7590. br1 = b_ref (1, 1);
  7591. br2 = b_ref (2, 1);
  7592. }
  7593. br2 -= lr21 * br1;
  7594. /* Computing MAX */
  7595. d__2 = (d__1 = br1 * (ur22 * ur11r), fabs (d__1)), d__3 = fabs (br2);
  7596. bbnd = MAX (d__2, d__3);
  7597. if (bbnd > 1. && fabs (ur22) < 1.) {
  7598. if (bbnd >= bignum * fabs (ur22)) {
  7599. *scale = 1. / bbnd;
  7600. }
  7601. }
  7602. xr2 = br2 * *scale / ur22;
  7603. xr1 = *scale * br1 * ur11r - xr2 * (ur11r * ur12);
  7604. if (zswap[icmax - 1]) {
  7605. x_ref (1, 1) = xr2;
  7606. x_ref (2, 1) = xr1;
  7607. } else {
  7608. x_ref (1, 1) = xr1;
  7609. x_ref (2, 1) = xr2;
  7610. }
  7611. /* Computing MAX */
  7612. d__1 = fabs (xr1), d__2 = fabs (xr2);
  7613. *xnorm = MAX (d__1, d__2);
  7614. /* Further scaling if norm(A) norm(X) > overflow */
  7615. if (*xnorm > 1. && cmax > 1.) {
  7616. if (*xnorm > bignum / cmax) {
  7617. temp = cmax / bignum;
  7618. x_ref (1, 1) = temp * x_ref (1, 1);
  7619. x_ref (2, 1) = temp * x_ref (2, 1);
  7620. *xnorm = temp * *xnorm;
  7621. *scale = temp * *scale;
  7622. }
  7623. }
  7624. } else {
  7625. /* Complex 2x2 system (w is complex)
  7626. Find the largest element in C */
  7627. ci_ref (1, 1) = - (*wi) * *d1;
  7628. ci_ref (2, 1) = 0.;
  7629. ci_ref (1, 2) = 0.;
  7630. ci_ref (2, 2) = - (*wi) * *d2;
  7631. cmax = 0.;
  7632. icmax = 0;
  7633. for (j = 1; j <= 4; ++j) {
  7634. if ( (d__1 = crv[j - 1], fabs (d__1)) + (d__2 = civ[j - 1], fabs (d__2)) > cmax) {
  7635. cmax = (d__1 = crv[j - 1], fabs (d__1)) + (d__2 = civ[j - 1], fabs (d__2));
  7636. icmax = j;
  7637. }
  7638. /* L20: */
  7639. }
  7640. /* If norm(C) < SMINI, use SMINI*identity. */
  7641. if (cmax < smini) {
  7642. /* Computing MAX */
  7643. d__5 = (d__1 = b_ref (1, 1), fabs (d__1)) + (d__2 = b_ref (1, 2), fabs (d__2)), d__6 = (d__3 =
  7644. b_ref (2, 1), fabs (d__3)) + (d__4 = b_ref (2, 2), fabs (d__4));
  7645. bnorm = MAX (d__5, d__6);
  7646. if (smini < 1. && bnorm > 1.) {
  7647. if (bnorm > bignum * smini) {
  7648. *scale = 1. / bnorm;
  7649. }
  7650. }
  7651. temp = *scale / smini;
  7652. x_ref (1, 1) = temp * b_ref (1, 1);
  7653. x_ref (2, 1) = temp * b_ref (2, 1);
  7654. x_ref (1, 2) = temp * b_ref (1, 2);
  7655. x_ref (2, 2) = temp * b_ref (2, 2);
  7656. *xnorm = temp * bnorm;
  7657. *info = 1;
  7658. return 0;
  7659. }
  7660. /* Gaussian elimination with complete pivoting. */
  7661. ur11 = crv[icmax - 1];
  7662. ui11 = civ[icmax - 1];
  7663. cr21 = crv[ipivot_ref (2, icmax) - 1];
  7664. ci21 = civ[ipivot_ref (2, icmax) - 1];
  7665. ur12 = crv[ipivot_ref (3, icmax) - 1];
  7666. ui12 = civ[ipivot_ref (3, icmax) - 1];
  7667. cr22 = crv[ipivot_ref (4, icmax) - 1];
  7668. ci22 = civ[ipivot_ref (4, icmax) - 1];
  7669. if (icmax == 1 || icmax == 4) {
  7670. /* Code when off-diagonals of pivoted C are real */
  7671. if (fabs (ur11) > fabs (ui11)) {
  7672. temp = ui11 / ur11;
  7673. /* Computing 2nd power */
  7674. d__1 = temp;
  7675. ur11r = 1. / (ur11 * (d__1 * d__1 + 1.));
  7676. ui11r = -temp * ur11r;
  7677. } else {
  7678. temp = ur11 / ui11;
  7679. /* Computing 2nd power */
  7680. d__1 = temp;
  7681. ui11r = -1. / (ui11 * (d__1 * d__1 + 1.));
  7682. ur11r = -temp * ui11r;
  7683. }
  7684. lr21 = cr21 * ur11r;
  7685. li21 = cr21 * ui11r;
  7686. ur12s = ur12 * ur11r;
  7687. ui12s = ur12 * ui11r;
  7688. ur22 = cr22 - ur12 * lr21;
  7689. ui22 = ci22 - ur12 * li21;
  7690. } else {
  7691. /* Code when diagonals of pivoted C are real */
  7692. ur11r = 1. / ur11;
  7693. ui11r = 0.;
  7694. lr21 = cr21 * ur11r;
  7695. li21 = ci21 * ur11r;
  7696. ur12s = ur12 * ur11r;
  7697. ui12s = ui12 * ur11r;
  7698. ur22 = cr22 - ur12 * lr21 + ui12 * li21;
  7699. ui22 = -ur12 * li21 - ui12 * lr21;
  7700. }
  7701. u22abs = fabs (ur22) + fabs (ui22);
  7702. /* If smaller pivot < SMINI, use SMINI */
  7703. if (u22abs < smini) {
  7704. ur22 = smini;
  7705. ui22 = 0.;
  7706. *info = 1;
  7707. }
  7708. if (rswap[icmax - 1]) {
  7709. br2 = b_ref (1, 1);
  7710. br1 = b_ref (2, 1);
  7711. bi2 = b_ref (1, 2);
  7712. bi1 = b_ref (2, 2);
  7713. } else {
  7714. br1 = b_ref (1, 1);
  7715. br2 = b_ref (2, 1);
  7716. bi1 = b_ref (1, 2);
  7717. bi2 = b_ref (2, 2);
  7718. }
  7719. br2 = br2 - lr21 * br1 + li21 * bi1;
  7720. bi2 = bi2 - li21 * br1 - lr21 * bi1;
  7721. /* Computing MAX */
  7722. // djmw 20110721 changed abs(br2) to fabs(br2)
  7723. d__1 = (fabs (br1) + fabs (bi1)) * (u22abs * (fabs (ur11r) + fabs (ui11r))), d__2 =
  7724. fabs (br2) + fabs (bi2);
  7725. bbnd = MAX (d__1, d__2);
  7726. if (bbnd > 1. && u22abs < 1.) {
  7727. if (bbnd >= bignum * u22abs) {
  7728. *scale = 1. / bbnd;
  7729. br1 = *scale * br1;
  7730. bi1 = *scale * bi1;
  7731. br2 = *scale * br2;
  7732. bi2 = *scale * bi2;
  7733. }
  7734. }
  7735. NUMlapack_dladiv (&br2, &bi2, &ur22, &ui22, &xr2, &xi2);
  7736. xr1 = ur11r * br1 - ui11r * bi1 - ur12s * xr2 + ui12s * xi2;
  7737. xi1 = ui11r * br1 + ur11r * bi1 - ui12s * xr2 - ur12s * xi2;
  7738. if (zswap[icmax - 1]) {
  7739. x_ref (1, 1) = xr2;
  7740. x_ref (2, 1) = xr1;
  7741. x_ref (1, 2) = xi2;
  7742. x_ref (2, 2) = xi1;
  7743. } else {
  7744. x_ref (1, 1) = xr1;
  7745. x_ref (2, 1) = xr2;
  7746. x_ref (1, 2) = xi1;
  7747. x_ref (2, 2) = xi2;
  7748. }
  7749. /* Computing MAX */
  7750. d__1 = fabs (xr1) + fabs (xi1), d__2 = fabs (xr2) + fabs (xi2);
  7751. *xnorm = MAX (d__1, d__2);
  7752. /* Further scaling if norm(A) norm(X) > overflow */
  7753. if (*xnorm > 1. && cmax > 1.) {
  7754. if (*xnorm > bignum / cmax) {
  7755. temp = cmax / bignum;
  7756. x_ref (1, 1) = temp * x_ref (1, 1);
  7757. x_ref (2, 1) = temp * x_ref (2, 1);
  7758. x_ref (1, 2) = temp * x_ref (1, 2);
  7759. x_ref (2, 2) = temp * x_ref (2, 2);
  7760. *xnorm = temp * *xnorm;
  7761. *scale = temp * *scale;
  7762. }
  7763. }
  7764. }
  7765. }
  7766. return 0;
  7767. } /* NUMlapack_NUMlapack_dlaln2 */
  7768. #undef ipivot_ref
  7769. #undef cr_ref
  7770. #undef ci_ref
  7771. #undef x_ref
  7772. #undef b_ref
  7773. #undef crv
  7774. #undef civ
  7775. #undef cr
  7776. #undef ci
  7777. double NUMlapack_dlange (const char *norm, integer *m, integer *n, double *a, integer *lda, double *work) {
  7778. /* Table of constant values */
  7779. static integer c__1 = 1;
  7780. /* System generated locals */
  7781. integer a_dim1, a_offset, i__1, i__2;
  7782. double ret_val, d__1, d__2, d__3;
  7783. /* Local variables */
  7784. static integer i__, j;
  7785. static double scale;
  7786. static double value;
  7787. static double sum;
  7788. a_dim1 = *lda;
  7789. a_offset = 1 + a_dim1 * 1;
  7790. a -= a_offset;
  7791. --work;
  7792. /* Function Body */
  7793. if (MIN (*m, *n) == 0) {
  7794. value = 0.;
  7795. } else if (lsame_ (norm, "M")) {
  7796. /* Find MAX(abs(A(i,j))). */
  7797. value = 0.;
  7798. i__1 = *n;
  7799. for (j = 1; j <= i__1; ++j) {
  7800. i__2 = *m;
  7801. for (i__ = 1; i__ <= i__2; ++i__) {
  7802. /* Computing MAX */
  7803. d__2 = value, d__3 = (d__1 = a_ref (i__, j), fabs (d__1));
  7804. value = MAX (d__2, d__3);
  7805. /* L10: */
  7806. }
  7807. /* L20: */
  7808. }
  7809. } else if (lsame_ (norm, "O") || * (unsigned char *) norm == '1') {
  7810. /* Find norm1(A). */
  7811. value = 0.;
  7812. i__1 = *n;
  7813. for (j = 1; j <= i__1; ++j) {
  7814. sum = 0.;
  7815. i__2 = *m;
  7816. for (i__ = 1; i__ <= i__2; ++i__) {
  7817. sum += (d__1 = a_ref (i__, j), fabs (d__1));
  7818. /* L30: */
  7819. }
  7820. value = MAX (value, sum);
  7821. /* L40: */
  7822. }
  7823. } else if (lsame_ (norm, "I")) {
  7824. /* Find normI(A). */
  7825. i__1 = *m;
  7826. for (i__ = 1; i__ <= i__1; ++i__) {
  7827. work[i__] = 0.;
  7828. /* L50: */
  7829. }
  7830. i__1 = *n;
  7831. for (j = 1; j <= i__1; ++j) {
  7832. i__2 = *m;
  7833. for (i__ = 1; i__ <= i__2; ++i__) {
  7834. work[i__] += (d__1 = a_ref (i__, j), fabs (d__1));
  7835. /* L60: */
  7836. }
  7837. /* L70: */
  7838. }
  7839. value = 0.;
  7840. i__1 = *m;
  7841. for (i__ = 1; i__ <= i__1; ++i__) {
  7842. /* Computing MAX */
  7843. d__1 = value, d__2 = work[i__];
  7844. value = MAX (d__1, d__2);
  7845. /* L80: */
  7846. }
  7847. } else if (lsame_ (norm, "F") || lsame_ (norm, "E")) {
  7848. /* Find normF(A). */
  7849. scale = 0.;
  7850. sum = 1.;
  7851. i__1 = *n;
  7852. for (j = 1; j <= i__1; ++j) {
  7853. NUMlapack_dlassq (m, &a_ref (1, j), &c__1, &scale, &sum);
  7854. /* L90: */
  7855. }
  7856. value = scale * sqrt (sum);
  7857. }
  7858. ret_val = value;
  7859. return ret_val;
  7860. } /* NUMlapack_dlange */
  7861. double NUMlapack_dlanhs (const char *norm, integer *n, double *a, integer *lda, double *work) {
  7862. /* Table of constant values */
  7863. static integer c__1 = 1;
  7864. /* System generated locals */
  7865. integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
  7866. double ret_val, d__1, d__2, d__3;
  7867. /* Local variables */
  7868. static integer i__, j;
  7869. static double scale;
  7870. static double value;
  7871. static double sum;
  7872. a_dim1 = *lda;
  7873. a_offset = 1 + a_dim1 * 1;
  7874. a -= a_offset;
  7875. --work;
  7876. /* Function Body */
  7877. if (*n == 0) {
  7878. value = 0.;
  7879. } else if (lsame_ (norm, "M")) {
  7880. /* Find MAX (fabs (A(i,j))). */
  7881. value = 0.;
  7882. i__1 = *n;
  7883. for (j = 1; j <= i__1; ++j) {
  7884. /* Computing MIN */
  7885. i__3 = *n, i__4 = j + 1;
  7886. i__2 = MIN (i__3, i__4);
  7887. for (i__ = 1; i__ <= i__2; ++i__) {
  7888. /* Computing MAX */
  7889. d__2 = value, d__3 = (d__1 = a_ref (i__, j), fabs (d__1));
  7890. value = MAX (d__2, d__3);
  7891. /* L10: */
  7892. }
  7893. /* L20: */
  7894. }
  7895. } else if (lsame_ (norm, "O") || * (unsigned char *) norm == '1') {
  7896. /* Find norm1(A). */
  7897. value = 0.;
  7898. i__1 = *n;
  7899. for (j = 1; j <= i__1; ++j) {
  7900. sum = 0.;
  7901. /* Computing MIN */
  7902. i__3 = *n, i__4 = j + 1;
  7903. i__2 = MIN (i__3, i__4);
  7904. for (i__ = 1; i__ <= i__2; ++i__) {
  7905. sum += (d__1 = a_ref (i__, j), fabs (d__1));
  7906. /* L30: */
  7907. }
  7908. value = MAX (value, sum);
  7909. /* L40: */
  7910. }
  7911. } else if (lsame_ (norm, "I")) {
  7912. /* Find normI(A). */
  7913. i__1 = *n;
  7914. for (i__ = 1; i__ <= i__1; ++i__) {
  7915. work[i__] = 0.;
  7916. /* L50: */
  7917. }
  7918. i__1 = *n;
  7919. for (j = 1; j <= i__1; ++j) {
  7920. /* Computing MIN */
  7921. i__3 = *n, i__4 = j + 1;
  7922. i__2 = MIN (i__3, i__4);
  7923. for (i__ = 1; i__ <= i__2; ++i__) {
  7924. work[i__] += (d__1 = a_ref (i__, j), fabs (d__1));
  7925. /* L60: */
  7926. }
  7927. /* L70: */
  7928. }
  7929. value = 0.;
  7930. i__1 = *n;
  7931. for (i__ = 1; i__ <= i__1; ++i__) {
  7932. /* Computing MAX */
  7933. d__1 = value, d__2 = work[i__];
  7934. value = MAX (d__1, d__2);
  7935. /* L80: */
  7936. }
  7937. } else if (lsame_ (norm, "F") || lsame_ (norm, "E")) {
  7938. /* Find normF(A). */
  7939. scale = 0.;
  7940. sum = 1.;
  7941. i__1 = *n;
  7942. for (j = 1; j <= i__1; ++j) {
  7943. /* Computing MIN */
  7944. i__3 = *n, i__4 = j + 1;
  7945. i__2 = MIN (i__3, i__4);
  7946. NUMlapack_dlassq (&i__2, &a_ref (1, j), &c__1, &scale, &sum);
  7947. /* L90: */
  7948. }
  7949. value = scale * sqrt (sum);
  7950. }
  7951. ret_val = value;
  7952. return ret_val;
  7953. } /* NUMlapack_dlanhs */
  7954. double NUMlapack_dlanst (const char *norm, integer *n, double *d__, double *e) {
  7955. /* Table of constant values */
  7956. static integer c__1 = 1;
  7957. /* System generated locals */
  7958. integer i__1;
  7959. double ret_val, d__1, d__2, d__3, d__4, d__5;
  7960. static integer i__;
  7961. static double scale;
  7962. static double anorm;
  7963. static double sum;
  7964. --e;
  7965. --d__;
  7966. /* Function Body */
  7967. if (*n <= 0) {
  7968. anorm = 0.;
  7969. } else if (lsame_ (norm, "M")) {
  7970. /* Find max(abs(A(i,j))). */
  7971. anorm = (d__1 = d__[*n], fabs (d__1));
  7972. i__1 = *n - 1;
  7973. for (i__ = 1; i__ <= i__1; ++i__) {
  7974. /* Computing MAX */
  7975. d__2 = anorm, d__3 = (d__1 = d__[i__], fabs (d__1));
  7976. anorm = MAX (d__2, d__3);
  7977. /* Computing MAX */
  7978. d__2 = anorm, d__3 = (d__1 = e[i__], fabs (d__1));
  7979. anorm = MAX (d__2, d__3);
  7980. /* L10: */
  7981. }
  7982. } else if (lsame_ (norm, "O") || * (unsigned char *) norm == '1' || lsame_ (norm, "I")) {
  7983. /* Find norm1(A). */
  7984. if (*n == 1) {
  7985. anorm = fabs (d__[1]);
  7986. } else {
  7987. /* Computing MAX */
  7988. d__3 = fabs (d__[1]) + fabs (e[1]), d__4 = (d__1 = e[*n - 1], fabs (d__1)) + (d__2 =
  7989. d__[*n], fabs (d__2));
  7990. anorm = MAX (d__3, d__4);
  7991. i__1 = *n - 1;
  7992. for (i__ = 2; i__ <= i__1; ++i__) {
  7993. /* Computing MAX */
  7994. d__4 = anorm, d__5 = (d__1 = d__[i__], fabs (d__1)) + (d__2 = e[i__], fabs (d__2)) + (d__3 =
  7995. e[i__ - 1], fabs (d__3));
  7996. anorm = MAX (d__4, d__5);
  7997. /* L20: */
  7998. }
  7999. }
  8000. } else if (lsame_ (norm, "F") || lsame_ (norm, "E")) {
  8001. /* Find normF(A). */
  8002. scale = 0.;
  8003. sum = 1.;
  8004. if (*n > 1) {
  8005. i__1 = *n - 1;
  8006. NUMlapack_dlassq (&i__1, &e[1], &c__1, &scale, &sum);
  8007. sum *= 2;
  8008. }
  8009. NUMlapack_dlassq (n, &d__[1], &c__1, &scale, &sum);
  8010. anorm = scale * sqrt (sum);
  8011. }
  8012. ret_val = anorm;
  8013. return ret_val;
  8014. } /* NUMlapack_dlanst */
  8015. double NUMlapack_dlansy (const char *norm, const char *uplo, integer *n, double *a, integer *lda, double *work) {
  8016. /* Table of constant values */
  8017. static integer c__1 = 1;
  8018. /* System generated locals */
  8019. integer a_dim1, a_offset, i__1, i__2;
  8020. double ret_val, d__1, d__2, d__3;
  8021. /* Local variables */
  8022. static double absa;
  8023. static integer i__, j;
  8024. static double scale;
  8025. static double value;
  8026. static double sum;
  8027. a_dim1 = *lda;
  8028. a_offset = 1 + a_dim1 * 1;
  8029. a -= a_offset;
  8030. --work;
  8031. /* Function Body */
  8032. if (*n == 0) {
  8033. value = 0.;
  8034. } else if (lsame_ (norm, "M")) {
  8035. /* Find max(abs(A(i,j))). */
  8036. value = 0.;
  8037. if (lsame_ (uplo, "U")) {
  8038. i__1 = *n;
  8039. for (j = 1; j <= i__1; ++j) {
  8040. i__2 = j;
  8041. for (i__ = 1; i__ <= i__2; ++i__) {
  8042. /* Computing MAX */
  8043. d__2 = value, d__3 = (d__1 = a_ref (i__, j), fabs (d__1));
  8044. value = MAX (d__2, d__3);
  8045. /* L10: */
  8046. }
  8047. /* L20: */
  8048. }
  8049. } else {
  8050. i__1 = *n;
  8051. for (j = 1; j <= i__1; ++j) {
  8052. i__2 = *n;
  8053. for (i__ = j; i__ <= i__2; ++i__) {
  8054. /* Computing MAX */
  8055. d__2 = value, d__3 = (d__1 = a_ref (i__, j), fabs (d__1));
  8056. value = MAX (d__2, d__3);
  8057. /* L30: */
  8058. }
  8059. /* L40: */
  8060. }
  8061. }
  8062. } else if (lsame_ (norm, "I") || lsame_ (norm, "O") || * (unsigned char *) norm == '1') {
  8063. /* Find normI(A) ( = norm1(A), since A is symmetric). */
  8064. value = 0.;
  8065. if (lsame_ (uplo, "U")) {
  8066. i__1 = *n;
  8067. for (j = 1; j <= i__1; ++j) {
  8068. sum = 0.;
  8069. i__2 = j - 1;
  8070. for (i__ = 1; i__ <= i__2; ++i__) {
  8071. absa = (d__1 = a_ref (i__, j), fabs (d__1));
  8072. sum += absa;
  8073. work[i__] += absa;
  8074. /* L50: */
  8075. }
  8076. work[j] = sum + (d__1 = a_ref (j, j), fabs (d__1));
  8077. /* L60: */
  8078. }
  8079. i__1 = *n;
  8080. for (i__ = 1; i__ <= i__1; ++i__) {
  8081. /* Computing MAX */
  8082. d__1 = value, d__2 = work[i__];
  8083. value = MAX (d__1, d__2);
  8084. /* L70: */
  8085. }
  8086. } else {
  8087. i__1 = *n;
  8088. for (i__ = 1; i__ <= i__1; ++i__) {
  8089. work[i__] = 0.;
  8090. /* L80: */
  8091. }
  8092. i__1 = *n;
  8093. for (j = 1; j <= i__1; ++j) {
  8094. sum = work[j] + (d__1 = a_ref (j, j), fabs (d__1));
  8095. i__2 = *n;
  8096. for (i__ = j + 1; i__ <= i__2; ++i__) {
  8097. absa = (d__1 = a_ref (i__, j), fabs (d__1));
  8098. sum += absa;
  8099. work[i__] += absa;
  8100. /* L90: */
  8101. }
  8102. value = MAX (value, sum);
  8103. /* L100: */
  8104. }
  8105. }
  8106. } else if (lsame_ (norm, "F") || lsame_ (norm, "E")) {
  8107. /* Find normF(A). */
  8108. scale = 0.;
  8109. sum = 1.;
  8110. if (lsame_ (uplo, "U")) {
  8111. i__1 = *n;
  8112. for (j = 2; j <= i__1; ++j) {
  8113. i__2 = j - 1;
  8114. NUMlapack_dlassq (&i__2, &a_ref (1, j), &c__1, &scale, &sum);
  8115. /* L110: */
  8116. }
  8117. } else {
  8118. i__1 = *n - 1;
  8119. for (j = 1; j <= i__1; ++j) {
  8120. i__2 = *n - j;
  8121. NUMlapack_dlassq (&i__2, &a_ref (j + 1, j), &c__1, &scale, &sum);
  8122. /* L120: */
  8123. }
  8124. }
  8125. sum *= 2;
  8126. i__1 = *lda + 1;
  8127. NUMlapack_dlassq (n, &a[a_offset], &i__1, &scale, &sum);
  8128. value = scale * sqrt (sum);
  8129. }
  8130. ret_val = value;
  8131. return ret_val;
  8132. } /* NUMlapack_dlansy */
  8133. int NUMlapack_dlanv2 (double *a, double *b, double *c__, double *d__, double *rt1r, double *rt1i,
  8134. double *rt2r, double *rt2i, double *cs, double *sn) {
  8135. /* Table of constant values */
  8136. static double c_b4 = 1.;
  8137. /* System generated locals */
  8138. double d__1, d__2;
  8139. /* Local variables */
  8140. static double temp, p, scale, bcmax, z__, bcmis, sigma;
  8141. static double aa, bb, cc, dd;
  8142. static double cs1, sn1, sab, sac, eps, tau;
  8143. eps = NUMblas_dlamch ("P");
  8144. if (*c__ == 0.) {
  8145. *cs = 1.;
  8146. *sn = 0.;
  8147. goto L10;
  8148. } else if (*b == 0.) {
  8149. /* Swap rows and columns */
  8150. *cs = 0.;
  8151. *sn = 1.;
  8152. temp = *d__;
  8153. *d__ = *a;
  8154. *a = temp;
  8155. *b = - (*c__);
  8156. *c__ = 0.;
  8157. goto L10;
  8158. } else if (*a - *d__ == 0. && d_sign (&c_b4, b) != d_sign (&c_b4, c__)) {
  8159. *cs = 1.;
  8160. *sn = 0.;
  8161. goto L10;
  8162. } else {
  8163. temp = *a - *d__;
  8164. p = temp * .5;
  8165. /* Computing MAX */
  8166. d__1 = fabs (*b), d__2 = fabs (*c__);
  8167. bcmax = MAX (d__1, d__2);
  8168. /* Computing MIN */
  8169. d__1 = fabs (*b), d__2 = fabs (*c__);
  8170. bcmis = MIN (d__1, d__2) * d_sign (&c_b4, b) * d_sign (&c_b4, c__);
  8171. /* Computing MAX */
  8172. d__1 = fabs (p);
  8173. scale = MAX (d__1, bcmax);
  8174. z__ = p / scale * p + bcmax / scale * bcmis;
  8175. /* If Z is of the order of the machine accuracy, postpone the
  8176. decision on the nature of eigenvalues */
  8177. if (z__ >= eps * 4.) {
  8178. /* Real eigenvalues. Compute A and D. */
  8179. d__1 = sqrt (scale) * sqrt (z__);
  8180. z__ = p + d_sign (&d__1, &p);
  8181. *a = *d__ + z__;
  8182. *d__ -= bcmax / z__ * bcmis;
  8183. /* Compute B and the rotation matrix */
  8184. tau = NUMlapack_dlapy2 (c__, &z__);
  8185. *cs = z__ / tau;
  8186. *sn = *c__ / tau;
  8187. *b -= *c__;
  8188. *c__ = 0.;
  8189. } else {
  8190. /* Complex eigenvalues, or real (almost) equal eigenvalues. Make
  8191. diagonal elements equal. */
  8192. sigma = *b + *c__;
  8193. tau = NUMlapack_dlapy2 (&sigma, &temp);
  8194. *cs = sqrt ( (fabs (sigma) / tau + 1.) * .5);
  8195. *sn = - (p / (tau * *cs)) * d_sign (&c_b4, &sigma);
  8196. /* Compute [ AA BB ] = [ A B ] [ CS -SN ] [ CC DD ] [ C D ] [ SN
  8197. CS ] */
  8198. aa = *a * *cs + *b * *sn;
  8199. bb = - (*a) * *sn + *b * *cs;
  8200. cc = *c__ * *cs + *d__ * *sn;
  8201. dd = - (*c__) * *sn + *d__ * *cs;
  8202. /* Compute [ A B ] = [ CS SN ] [ AA BB ] [ C D ] [-SN CS ] [ CC
  8203. DD ] */
  8204. *a = aa * *cs + cc * *sn;
  8205. *b = bb * *cs + dd * *sn;
  8206. *c__ = -aa * *sn + cc * *cs;
  8207. *d__ = -bb * *sn + dd * *cs;
  8208. temp = (*a + *d__) * .5;
  8209. *a = temp;
  8210. *d__ = temp;
  8211. if (*c__ != 0.) {
  8212. if (*b != 0.) {
  8213. if (d_sign (&c_b4, b) == d_sign (&c_b4, c__)) {
  8214. /* Real eigenvalues: reduce to upper triangular form */
  8215. sab = sqrt ( (fabs (*b)));
  8216. sac = sqrt ( (fabs (*c__)));
  8217. d__1 = sab * sac;
  8218. p = d_sign (&d__1, c__);
  8219. tau = 1. / sqrt ( (d__1 = *b + *c__, fabs (d__1)));
  8220. *a = temp + p;
  8221. *d__ = temp - p;
  8222. *b -= *c__;
  8223. *c__ = 0.;
  8224. cs1 = sab * tau;
  8225. sn1 = sac * tau;
  8226. temp = *cs * cs1 - *sn * sn1;
  8227. *sn = *cs * sn1 + *sn * cs1;
  8228. *cs = temp;
  8229. }
  8230. } else {
  8231. *b = - (*c__);
  8232. *c__ = 0.;
  8233. temp = *cs;
  8234. *cs = - (*sn);
  8235. *sn = temp;
  8236. }
  8237. }
  8238. }
  8239. }
  8240. L10:
  8241. /* Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). */
  8242. *rt1r = *a;
  8243. *rt2r = *d__;
  8244. if (*c__ == 0.) {
  8245. *rt1i = 0.;
  8246. *rt2i = 0.;
  8247. } else {
  8248. *rt1i = sqrt ( (fabs (*b))) * sqrt ( (fabs (*c__)));
  8249. *rt2i = - (*rt1i);
  8250. }
  8251. return 0;
  8252. } /* NUMlapack_dlanv2 */
  8253. int NUMlapack_dlapll (integer *n, double *x, integer *incx, double *y, integer *incy, double *ssmin) {
  8254. /* System generated locals */
  8255. integer i__1;
  8256. /* Local variables */
  8257. static double c__;
  8258. static double ssmax, a11, a12, a22;
  8259. static double tau;
  8260. --y;
  8261. --x;
  8262. /* Function Body */
  8263. if (*n <= 1) {
  8264. *ssmin = 0.;
  8265. return 0;
  8266. }
  8267. /* Compute the QR factorization of the N-by-2 matrix ( X Y ) */
  8268. NUMlapack_dlarfg (n, &x[1], &x[*incx + 1], incx, &tau);
  8269. a11 = x[1];
  8270. x[1] = 1.;
  8271. c__ = -tau * NUMblas_ddot (n, &x[1], incx, &y[1], incy);
  8272. NUMblas_daxpy (n, &c__, &x[1], incx, &y[1], incy);
  8273. i__1 = *n - 1;
  8274. NUMlapack_dlarfg (&i__1, &y[*incy + 1], &y[ (*incy << 1) + 1], incy, &tau);
  8275. a12 = y[1];
  8276. a22 = y[*incy + 1];
  8277. /* Compute the SVD of 2-by-2 Upper triangular matrix. */
  8278. NUMlapack_dlas2 (&a11, &a12, &a22, ssmin, &ssmax);
  8279. return 0;
  8280. }
  8281. /* NUMlapack_dlapll */
  8282. #define x_ref(a_1,a_2) x[(a_2)*x_dim1 + a_1]
  8283. int NUMlapack_dlapmt (integer *forwrd, integer *m, integer *n, double *x, integer *ldx, integer *k) {
  8284. /* System generated locals */
  8285. integer x_dim1, x_offset, i__1, i__2;
  8286. /* Local variables */
  8287. static double temp;
  8288. static integer i__, j, ii, in;
  8289. x_dim1 = *ldx;
  8290. x_offset = 1 + x_dim1 * 1;
  8291. x -= x_offset;
  8292. --k;
  8293. /* Function Body */
  8294. if (*n <= 1) {
  8295. return 0;
  8296. }
  8297. i__1 = *n;
  8298. for (i__ = 1; i__ <= i__1; ++i__) {
  8299. k[i__] = -k[i__];
  8300. /* L10: */
  8301. }
  8302. if (*forwrd) {
  8303. /* Forward permutation */
  8304. i__1 = *n;
  8305. for (i__ = 1; i__ <= i__1; ++i__) {
  8306. if (k[i__] > 0) {
  8307. goto L40;
  8308. }
  8309. j = i__;
  8310. k[j] = -k[j];
  8311. in = k[j];
  8312. L20:
  8313. if (k[in] > 0) {
  8314. goto L40;
  8315. }
  8316. i__2 = *m;
  8317. for (ii = 1; ii <= i__2; ++ii) {
  8318. temp = x_ref (ii, j);
  8319. x_ref (ii, j) = x_ref (ii, in);
  8320. x_ref (ii, in) = temp;
  8321. /* L30: */
  8322. }
  8323. k[in] = -k[in];
  8324. j = in;
  8325. in = k[in];
  8326. goto L20;
  8327. L40:
  8328. /* L50: */
  8329. ;
  8330. }
  8331. } else {
  8332. /* Backward permutation */
  8333. i__1 = *n;
  8334. for (i__ = 1; i__ <= i__1; ++i__) {
  8335. if (k[i__] > 0) {
  8336. goto L80;
  8337. }
  8338. k[i__] = -k[i__];
  8339. j = k[i__];
  8340. L60:
  8341. if (j == i__) {
  8342. goto L80;
  8343. }
  8344. i__2 = *m;
  8345. for (ii = 1; ii <= i__2; ++ii) {
  8346. temp = x_ref (ii, i__);
  8347. x_ref (ii, i__) = x_ref (ii, j);
  8348. x_ref (ii, j) = temp;
  8349. /* L70: */
  8350. }
  8351. k[j] = -k[j];
  8352. j = k[j];
  8353. goto L60;
  8354. L80:
  8355. /* L90: */
  8356. ;
  8357. }
  8358. }
  8359. return 0;
  8360. } /* NUMlapack_dlapmt */
  8361. #undef x_ref
  8362. double NUMlapack_dlapy2 (double *x, double *y) {
  8363. /* System generated locals */
  8364. double ret_val, d__1;
  8365. /* Local variables */
  8366. static double xabs, yabs, w, z__;
  8367. xabs = fabs (*x);
  8368. yabs = fabs (*y);
  8369. w = MAX (xabs, yabs);
  8370. z__ = MIN (xabs, yabs);
  8371. if (z__ == 0.) {
  8372. ret_val = w;
  8373. } else {
  8374. /* Computing 2nd power */
  8375. d__1 = z__ / w;
  8376. ret_val = w * sqrt (d__1 * d__1 + 1.);
  8377. }
  8378. return ret_val;
  8379. } /* NUMlapack_dlapy2 */
  8380. #define work_ref(a_1,a_2) work[(a_2)*work_dim1 + a_1]
  8381. #define v_ref(a_1,a_2) v[(a_2)*v_dim1 + a_1]
  8382. int NUMlapack_dlarfb (const char *side, const char *trans, const char *direct, const char *storev, integer *m, integer *n, integer *k, double *v,
  8383. integer *ldv, double *t, integer *ldt, double *c__, integer *ldc, double *work, integer *ldwork) {
  8384. /* Table of constant values */
  8385. static integer c__1 = 1;
  8386. static double c_b14 = 1.;
  8387. static double c_b25 = -1.;
  8388. /* System generated locals */
  8389. integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset;
  8390. integer work_dim1, work_offset, i__1, i__2;
  8391. /* Local variables */
  8392. static integer i__, j;
  8393. static char transt[1];
  8394. v_dim1 = *ldv;
  8395. v_offset = 1 + v_dim1 * 1;
  8396. v -= v_offset;
  8397. t_dim1 = *ldt;
  8398. t_offset = 1 + t_dim1 * 1;
  8399. t -= t_offset;
  8400. c_dim1 = *ldc;
  8401. c_offset = 1 + c_dim1 * 1;
  8402. c__ -= c_offset;
  8403. work_dim1 = *ldwork;
  8404. work_offset = 1 + work_dim1 * 1;
  8405. work -= work_offset;
  8406. /* Function Body */
  8407. if (*m <= 0 || *n <= 0) {
  8408. return 0;
  8409. }
  8410. if (lsame_ (trans, "N")) {
  8411. * (unsigned char *) transt = 'T';
  8412. } else {
  8413. * (unsigned char *) transt = 'N';
  8414. }
  8415. if (lsame_ (storev, "C")) {
  8416. if (lsame_ (direct, "F")) {
  8417. /* Let V = ( V1 ) (first K rows) ( V2 ) where V1 is unit lower
  8418. triangular. */
  8419. if (lsame_ (side, "L")) {
  8420. /* Form H * C or H' * C where C = ( C1 ) ( C2 )
  8421. W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
  8422. W := C1' */
  8423. i__1 = *k;
  8424. for (j = 1; j <= i__1; ++j) {
  8425. NUMblas_dcopy (n, &c___ref (j, 1), ldc, &work_ref (1, j), &c__1);
  8426. /* L10: */
  8427. }
  8428. /* W := W * V1 */
  8429. NUMblas_dtrmm ("Right", "Lower", "No transpose", "Unit", n, k, &c_b14, &v[v_offset], ldv,
  8430. &work[work_offset], ldwork);
  8431. if (*m > *k) {
  8432. /* W := W + C2'*V2 */
  8433. i__1 = *m - *k;
  8434. NUMblas_dgemm ("Transpose", "No transpose", n, k, &i__1, &c_b14, &c___ref (*k + 1, 1), ldc,
  8435. &v_ref (*k + 1, 1), ldv, &c_b14, &work[work_offset], ldwork);
  8436. }
  8437. /* W := W * T' or W * T */
  8438. NUMblas_dtrmm ("Right", "Upper", transt, "Non-unit", n, k, &c_b14, &t[t_offset], ldt,
  8439. &work[work_offset], ldwork);
  8440. /* C := C - V * W' */
  8441. if (*m > *k) {
  8442. /* C2 := C2 - V2 * W' */
  8443. i__1 = *m - *k;
  8444. NUMblas_dgemm ("No transpose", "Transpose", &i__1, n, k, &c_b25, &v_ref (*k + 1, 1), ldv,
  8445. &work[work_offset], ldwork, &c_b14, &c___ref (*k + 1, 1), ldc);
  8446. }
  8447. /* W := W * V1' */
  8448. NUMblas_dtrmm ("Right", "Lower", "Transpose", "Unit", n, k, &c_b14, &v[v_offset], ldv,
  8449. &work[work_offset], ldwork);
  8450. /* C1 := C1 - W' */
  8451. i__1 = *k;
  8452. for (j = 1; j <= i__1; ++j) {
  8453. i__2 = *n;
  8454. for (i__ = 1; i__ <= i__2; ++i__) {
  8455. c___ref (j, i__) = c___ref (j, i__) - work_ref (i__, j);
  8456. /* L20: */
  8457. }
  8458. /* L30: */
  8459. }
  8460. } else if (lsame_ (side, "R")) {
  8461. /* Form C * H or C * H' where C = ( C1 C2 )
  8462. W := C * V = (C1*V1 + C2*V2) (stored in WORK)
  8463. W := C1 */
  8464. i__1 = *k;
  8465. for (j = 1; j <= i__1; ++j) {
  8466. NUMblas_dcopy (m, &c___ref (1, j), &c__1, &work_ref (1, j), &c__1);
  8467. /* L40: */
  8468. }
  8469. /* W := W * V1 */
  8470. NUMblas_dtrmm ("Right", "Lower", "No transpose", "Unit", m, k, &c_b14, &v[v_offset], ldv,
  8471. &work[work_offset], ldwork);
  8472. if (*n > *k) {
  8473. /* W := W + C2 * V2 */
  8474. i__1 = *n - *k;
  8475. NUMblas_dgemm ("No transpose", "No transpose", m, k, &i__1, &c_b14, &c___ref (1, *k + 1), ldc,
  8476. &v_ref (*k + 1, 1), ldv, &c_b14, &work[work_offset], ldwork);
  8477. }
  8478. /* W := W * T or W * T' */
  8479. NUMblas_dtrmm ("Right", "Upper", trans, "Non-unit", m, k, &c_b14, &t[t_offset], ldt,
  8480. &work[work_offset], ldwork);
  8481. /* C := C - W * V' */
  8482. if (*n > *k) {
  8483. /* C2 := C2 - W * V2' */
  8484. i__1 = *n - *k;
  8485. NUMblas_dgemm ("No transpose", "Transpose", m, &i__1, k, &c_b25, &work[work_offset], ldwork,
  8486. &v_ref (*k + 1, 1), ldv, &c_b14, &c___ref (1, *k + 1), ldc);
  8487. }
  8488. /* W := W * V1' */
  8489. NUMblas_dtrmm ("Right", "Lower", "Transpose", "Unit", m, k, &c_b14, &v[v_offset], ldv,
  8490. &work[work_offset], ldwork);
  8491. /* C1 := C1 - W */
  8492. i__1 = *k;
  8493. for (j = 1; j <= i__1; ++j) {
  8494. i__2 = *m;
  8495. for (i__ = 1; i__ <= i__2; ++i__) {
  8496. c___ref (i__, j) = c___ref (i__, j) - work_ref (i__, j);
  8497. /* L50: */
  8498. }
  8499. /* L60: */
  8500. }
  8501. }
  8502. } else {
  8503. /* Let V = ( V1 ) ( V2 ) (last K rows) where V2 is unit upper
  8504. triangular. */
  8505. if (lsame_ (side, "L")) {
  8506. /* Form H * C or H' * C where C = ( C1 ) ( C2 )
  8507. W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
  8508. W := C2' */
  8509. i__1 = *k;
  8510. for (j = 1; j <= i__1; ++j) {
  8511. NUMblas_dcopy (n, &c___ref (*m - *k + j, 1), ldc, &work_ref (1, j), &c__1);
  8512. /* L70: */
  8513. }
  8514. /* W := W * V2 */
  8515. NUMblas_dtrmm ("Right", "Upper", "No transpose", "Unit", n, k, &c_b14, &v_ref (*m - *k + 1, 1), ldv,
  8516. &work[work_offset], ldwork);
  8517. if (*m > *k) {
  8518. /* W := W + C1'*V1 */
  8519. i__1 = *m - *k;
  8520. NUMblas_dgemm ("Transpose", "No transpose", n, k, &i__1, &c_b14, &c__[c_offset], ldc,
  8521. &v[v_offset], ldv, &c_b14, &work[work_offset], ldwork);
  8522. }
  8523. /* W := W * T' or W * T */
  8524. NUMblas_dtrmm ("Right", "Lower", transt, "Non-unit", n, k, &c_b14, &t[t_offset], ldt,
  8525. &work[work_offset], ldwork);
  8526. /* C := C - V * W' */
  8527. if (*m > *k) {
  8528. /* C1 := C1 - V1 * W' */
  8529. i__1 = *m - *k;
  8530. NUMblas_dgemm ("No transpose", "Transpose", &i__1, n, k, &c_b25, &v[v_offset], ldv,
  8531. &work[work_offset], ldwork, &c_b14, &c__[c_offset], ldc);
  8532. }
  8533. /* W := W * V2' */
  8534. NUMblas_dtrmm ("Right", "Upper", "Transpose", "Unit", n, k, &c_b14, &v_ref (*m - *k + 1, 1), ldv,
  8535. &work[work_offset], ldwork);
  8536. /* C2 := C2 - W' */
  8537. i__1 = *k;
  8538. for (j = 1; j <= i__1; ++j) {
  8539. i__2 = *n;
  8540. for (i__ = 1; i__ <= i__2; ++i__) {
  8541. c___ref (*m - *k + j, i__) = c___ref (*m - *k + j, i__) - work_ref (i__, j);
  8542. /* L80: */
  8543. }
  8544. /* L90: */
  8545. }
  8546. } else if (lsame_ (side, "R")) {
  8547. /* Form C * H or C * H' where C = ( C1 C2 )
  8548. W := C * V = (C1*V1 + C2*V2) (stored in WORK)
  8549. W := C2 */
  8550. i__1 = *k;
  8551. for (j = 1; j <= i__1; ++j) {
  8552. NUMblas_dcopy (m, &c___ref (1, *n - *k + j), &c__1, &work_ref (1, j), &c__1);
  8553. /* L100: */
  8554. }
  8555. /* W := W * V2 */
  8556. NUMblas_dtrmm ("Right", "Upper", "No transpose", "Unit", m, k, &c_b14, &v_ref (*n - *k + 1, 1), ldv,
  8557. &work[work_offset], ldwork);
  8558. if (*n > *k) {
  8559. /* W := W + C1 * V1 */
  8560. i__1 = *n - *k;
  8561. NUMblas_dgemm ("No transpose", "No transpose", m, k, &i__1, &c_b14, &c__[c_offset], ldc,
  8562. &v[v_offset], ldv, &c_b14, &work[work_offset], ldwork);
  8563. }
  8564. /* W := W * T or W * T' */
  8565. NUMblas_dtrmm ("Right", "Lower", trans, "Non-unit", m, k, &c_b14, &t[t_offset], ldt,
  8566. &work[work_offset], ldwork);
  8567. /* C := C - W * V' */
  8568. if (*n > *k) {
  8569. /* C1 := C1 - W * V1' */
  8570. i__1 = *n - *k;
  8571. NUMblas_dgemm ("No transpose", "Transpose", m, &i__1, k, &c_b25, &work[work_offset], ldwork,
  8572. &v[v_offset], ldv, &c_b14, &c__[c_offset], ldc);
  8573. }
  8574. /* W := W * V2' */
  8575. NUMblas_dtrmm ("Right", "Upper", "Transpose", "Unit", m, k, &c_b14, &v_ref (*n - *k + 1, 1), ldv,
  8576. &work[work_offset], ldwork);
  8577. /* C2 := C2 - W */
  8578. i__1 = *k;
  8579. for (j = 1; j <= i__1; ++j) {
  8580. i__2 = *m;
  8581. for (i__ = 1; i__ <= i__2; ++i__) {
  8582. c___ref (i__, *n - *k + j) = c___ref (i__, *n - *k + j) - work_ref (i__, j);
  8583. /* L110: */
  8584. }
  8585. /* L120: */
  8586. }
  8587. }
  8588. }
  8589. } else if (lsame_ (storev, "R")) {
  8590. if (lsame_ (direct, "F")) {
  8591. /* Let V = ( V1 V2 ) (V1: first K columns) where V1 is unit upper
  8592. triangular. */
  8593. if (lsame_ (side, "L")) {
  8594. /* Form H * C or H' * C where C = ( C1 ) ( C2 )
  8595. W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
  8596. W := C1' */
  8597. i__1 = *k;
  8598. for (j = 1; j <= i__1; ++j) {
  8599. NUMblas_dcopy (n, &c___ref (j, 1), ldc, &work_ref (1, j), &c__1);
  8600. /* L130: */
  8601. }
  8602. /* W := W * V1' */
  8603. NUMblas_dtrmm ("Right", "Upper", "Transpose", "Unit", n, k, &c_b14, &v[v_offset], ldv,
  8604. &work[work_offset], ldwork);
  8605. if (*m > *k) {
  8606. /* W := W + C2'*V2' */
  8607. i__1 = *m - *k;
  8608. NUMblas_dgemm ("Transpose", "Transpose", n, k, &i__1, &c_b14, &c___ref (*k + 1, 1), ldc,
  8609. &v_ref (1, *k + 1), ldv, &c_b14, &work[work_offset], ldwork);
  8610. }
  8611. /* W := W * T' or W * T */
  8612. NUMblas_dtrmm ("Right", "Upper", transt, "Non-unit", n, k, &c_b14, &t[t_offset], ldt,
  8613. &work[work_offset], ldwork);
  8614. /* C := C - V' * W' */
  8615. if (*m > *k) {
  8616. /* C2 := C2 - V2' * W' */
  8617. i__1 = *m - *k;
  8618. NUMblas_dgemm ("Transpose", "Transpose", &i__1, n, k, &c_b25, &v_ref (1, *k + 1), ldv,
  8619. &work[work_offset], ldwork, &c_b14, &c___ref (*k + 1, 1), ldc);
  8620. }
  8621. /* W := W * V1 */
  8622. NUMblas_dtrmm ("Right", "Upper", "No transpose", "Unit", n, k, &c_b14, &v[v_offset], ldv,
  8623. &work[work_offset], ldwork);
  8624. /* C1 := C1 - W' */
  8625. i__1 = *k;
  8626. for (j = 1; j <= i__1; ++j) {
  8627. i__2 = *n;
  8628. for (i__ = 1; i__ <= i__2; ++i__) {
  8629. c___ref (j, i__) = c___ref (j, i__) - work_ref (i__, j);
  8630. /* L140: */
  8631. }
  8632. /* L150: */
  8633. }
  8634. } else if (lsame_ (side, "R")) {
  8635. /* Form C * H or C * H' where C = ( C1 C2 )
  8636. W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
  8637. W := C1 */
  8638. i__1 = *k;
  8639. for (j = 1; j <= i__1; ++j) {
  8640. NUMblas_dcopy (m, &c___ref (1, j), &c__1, &work_ref (1, j), &c__1);
  8641. /* L160: */
  8642. }
  8643. /* W := W * V1' */
  8644. NUMblas_dtrmm ("Right", "Upper", "Transpose", "Unit", m, k, &c_b14, &v[v_offset], ldv,
  8645. &work[work_offset], ldwork);
  8646. if (*n > *k) {
  8647. /* W := W + C2 * V2' */
  8648. i__1 = *n - *k;
  8649. NUMblas_dgemm ("No transpose", "Transpose", m, k, &i__1, &c_b14, &c___ref (1, *k + 1), ldc,
  8650. &v_ref (1, *k + 1), ldv, &c_b14, &work[work_offset], ldwork);
  8651. }
  8652. /* W := W * T or W * T' */
  8653. NUMblas_dtrmm ("Right", "Upper", trans, "Non-unit", m, k, &c_b14, &t[t_offset], ldt,
  8654. &work[work_offset], ldwork);
  8655. /* C := C - W * V */
  8656. if (*n > *k) {
  8657. /* C2 := C2 - W * V2 */
  8658. i__1 = *n - *k;
  8659. NUMblas_dgemm ("No transpose", "No transpose", m, &i__1, k, &c_b25, &work[work_offset], ldwork,
  8660. &v_ref (1, *k + 1), ldv, &c_b14, &c___ref (1, *k + 1), ldc);
  8661. }
  8662. /* W := W * V1 */
  8663. NUMblas_dtrmm ("Right", "Upper", "No transpose", "Unit", m, k, &c_b14, &v[v_offset], ldv,
  8664. &work[work_offset], ldwork);
  8665. /* C1 := C1 - W */
  8666. i__1 = *k;
  8667. for (j = 1; j <= i__1; ++j) {
  8668. i__2 = *m;
  8669. for (i__ = 1; i__ <= i__2; ++i__) {
  8670. c___ref (i__, j) = c___ref (i__, j) - work_ref (i__, j);
  8671. /* L170: */
  8672. }
  8673. /* L180: */
  8674. }
  8675. }
  8676. } else {
  8677. /* Let V = ( V1 V2 ) (V2: last K columns) where V2 is unit lower
  8678. triangular. */
  8679. if (lsame_ (side, "L")) {
  8680. /* Form H * C or H' * C where C = ( C1 ) ( C2 )
  8681. W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
  8682. W := C2' */
  8683. i__1 = *k;
  8684. for (j = 1; j <= i__1; ++j) {
  8685. NUMblas_dcopy (n, &c___ref (*m - *k + j, 1), ldc, &work_ref (1, j), &c__1);
  8686. /* L190: */
  8687. }
  8688. /* W := W * V2' */
  8689. NUMblas_dtrmm ("Right", "Lower", "Transpose", "Unit", n, k, &c_b14, &v_ref (1, *m - *k + 1), ldv,
  8690. &work[work_offset], ldwork);
  8691. if (*m > *k) {
  8692. /* W := W + C1'*V1' */
  8693. i__1 = *m - *k;
  8694. NUMblas_dgemm ("Transpose", "Transpose", n, k, &i__1, &c_b14, &c__[c_offset], ldc, &v[v_offset],
  8695. ldv, &c_b14, &work[work_offset], ldwork);
  8696. }
  8697. /* W := W * T' or W * T */
  8698. NUMblas_dtrmm ("Right", "Lower", transt, "Non-unit", n, k, &c_b14, &t[t_offset], ldt,
  8699. &work[work_offset], ldwork);
  8700. /* C := C - V' * W' */
  8701. if (*m > *k) {
  8702. /* C1 := C1 - V1' * W' */
  8703. i__1 = *m - *k;
  8704. NUMblas_dgemm ("Transpose", "Transpose", &i__1, n, k, &c_b25, &v[v_offset], ldv,
  8705. &work[work_offset], ldwork, &c_b14, &c__[c_offset], ldc);
  8706. }
  8707. /* W := W * V2 */
  8708. NUMblas_dtrmm ("Right", "Lower", "No transpose", "Unit", n, k, &c_b14, &v_ref (1, *m - *k + 1), ldv,
  8709. &work[work_offset], ldwork);
  8710. /* C2 := C2 - W' */
  8711. i__1 = *k;
  8712. for (j = 1; j <= i__1; ++j) {
  8713. i__2 = *n;
  8714. for (i__ = 1; i__ <= i__2; ++i__) {
  8715. c___ref (*m - *k + j, i__) = c___ref (*m - *k + j, i__) - work_ref (i__, j);
  8716. /* L200: */
  8717. }
  8718. /* L210: */
  8719. }
  8720. } else if (lsame_ (side, "R")) {
  8721. /* Form C * H or C * H' where C = ( C1 C2 )
  8722. W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
  8723. W := C2 */
  8724. i__1 = *k;
  8725. for (j = 1; j <= i__1; ++j) {
  8726. NUMblas_dcopy (m, &c___ref (1, *n - *k + j), &c__1, &work_ref (1, j), &c__1);
  8727. /* L220: */
  8728. }
  8729. /* W := W * V2' */
  8730. NUMblas_dtrmm ("Right", "Lower", "Transpose", "Unit", m, k, &c_b14, &v_ref (1, *n - *k + 1), ldv,
  8731. &work[work_offset], ldwork);
  8732. if (*n > *k) {
  8733. /* W := W + C1 * V1' */
  8734. i__1 = *n - *k;
  8735. NUMblas_dgemm ("No transpose", "Transpose", m, k, &i__1, &c_b14, &c__[c_offset], ldc,
  8736. &v[v_offset], ldv, &c_b14, &work[work_offset], ldwork);
  8737. }
  8738. /* W := W * T or W * T' */
  8739. NUMblas_dtrmm ("Right", "Lower", trans, "Non-unit", m, k, &c_b14, &t[t_offset], ldt,
  8740. &work[work_offset], ldwork);
  8741. /* C := C - W * V */
  8742. if (*n > *k) {
  8743. /* C1 := C1 - W * V1 */
  8744. i__1 = *n - *k;
  8745. NUMblas_dgemm ("No transpose", "No transpose", m, &i__1, k, &c_b25, &work[work_offset], ldwork,
  8746. &v[v_offset], ldv, &c_b14, &c__[c_offset], ldc);
  8747. }
  8748. /* W := W * V2 */
  8749. NUMblas_dtrmm ("Right", "Lower", "No transpose", "Unit", m, k, &c_b14, &v_ref (1, *n - *k + 1), ldv,
  8750. &work[work_offset], ldwork);
  8751. /* C1 := C1 - W */
  8752. i__1 = *k;
  8753. for (j = 1; j <= i__1; ++j) {
  8754. i__2 = *m;
  8755. for (i__ = 1; i__ <= i__2; ++i__) {
  8756. c___ref (i__, *n - *k + j) = c___ref (i__, *n - *k + j) - work_ref (i__, j);
  8757. /* L230: */
  8758. }
  8759. /* L240: */
  8760. }
  8761. }
  8762. }
  8763. }
  8764. return 0;
  8765. } /* NUMlapack_dlarfb */
  8766. #undef v_ref
  8767. #undef work_ref
  8768. int NUMlapack_dlarf (const char *side, integer *m, integer *n, double *v, integer *incv, double *tau, double *c__, integer *ldc,
  8769. double *work) {
  8770. /* Table of constant values */
  8771. static double c_b4 = 1.;
  8772. static double c_b5 = 0.;
  8773. static integer c__1 = 1;
  8774. /* System generated locals */
  8775. integer c_dim1, c_offset;
  8776. double d__1;
  8777. /* Local variables */
  8778. --v;
  8779. c_dim1 = *ldc;
  8780. c_offset = 1 + c_dim1 * 1;
  8781. c__ -= c_offset;
  8782. --work;
  8783. /* Function Body */
  8784. if (lsame_ (side, "L")) {
  8785. /* Form H * C */
  8786. if (*tau != 0.) {
  8787. /* w := C' * v */
  8788. NUMblas_dgemv ("Transpose", m, n, &c_b4, &c__[c_offset], ldc, &v[1], incv, &c_b5, &work[1], &c__1);
  8789. /* C := C - v * w' */
  8790. d__1 = - (*tau);
  8791. NUMblas_dger (m, n, &d__1, &v[1], incv, &work[1], &c__1, &c__[c_offset], ldc);
  8792. }
  8793. } else {
  8794. /* Form C * H */
  8795. if (*tau != 0.) {
  8796. /* w := C * v */
  8797. NUMblas_dgemv ("No transpose", m, n, &c_b4, &c__[c_offset], ldc, &v[1], incv, &c_b5, &work[1], &c__1);
  8798. /* C := C - w * v' */
  8799. d__1 = - (*tau);
  8800. NUMblas_dger (m, n, &d__1, &work[1], &c__1, &v[1], incv, &c__[c_offset], ldc);
  8801. }
  8802. }
  8803. return 0;
  8804. } /* NUMlapack_dlarf */
  8805. int NUMlapack_dlarfg (integer *n, double *alpha, double *x, integer *incx, double *tau) {
  8806. /* System generated locals */
  8807. integer i__1;
  8808. double d__1;
  8809. /* Local variables */
  8810. static double beta;
  8811. static integer j;
  8812. static double xnorm;
  8813. static double safmin, rsafmn;
  8814. static integer knt;
  8815. --x;
  8816. /* Function Body */
  8817. if (*n <= 1) {
  8818. *tau = 0.;
  8819. return 0;
  8820. }
  8821. i__1 = *n - 1;
  8822. xnorm = NUMblas_dnrm2 (&i__1, &x[1], incx);
  8823. if (xnorm == 0.) {
  8824. /* H = I */
  8825. *tau = 0.;
  8826. } else {
  8827. /* general case */
  8828. d__1 = NUMlapack_dlapy2 (alpha, &xnorm);
  8829. beta = -d_sign (&d__1, alpha);
  8830. safmin = NUMblas_dlamch ("S") / NUMblas_dlamch ("E");
  8831. if (fabs (beta) < safmin) {
  8832. /* XNORM, BETA may be inaccurate; scale X and recompute them */
  8833. rsafmn = 1. / safmin;
  8834. knt = 0;
  8835. L10:
  8836. ++knt;
  8837. i__1 = *n - 1;
  8838. NUMblas_dscal (&i__1, &rsafmn, &x[1], incx);
  8839. beta *= rsafmn;
  8840. *alpha *= rsafmn;
  8841. if (fabs (beta) < safmin) {
  8842. goto L10;
  8843. }
  8844. /* New BETA is at most 1, at least SAFMIN */
  8845. i__1 = *n - 1;
  8846. xnorm = NUMblas_dnrm2 (&i__1, &x[1], incx);
  8847. d__1 = NUMlapack_dlapy2 (alpha, &xnorm);
  8848. beta = -d_sign (&d__1, alpha);
  8849. *tau = (beta - *alpha) / beta;
  8850. i__1 = *n - 1;
  8851. d__1 = 1. / (*alpha - beta);
  8852. NUMblas_dscal (&i__1, &d__1, &x[1], incx);
  8853. /* If ALPHA is subnormal, it may lose relative accuracy */
  8854. *alpha = beta;
  8855. i__1 = knt;
  8856. for (j = 1; j <= i__1; ++j) {
  8857. *alpha *= safmin;
  8858. /* L20: */
  8859. }
  8860. } else {
  8861. *tau = (beta - *alpha) / beta;
  8862. i__1 = *n - 1;
  8863. d__1 = 1. / (*alpha - beta);
  8864. NUMblas_dscal (&i__1, &d__1, &x[1], incx);
  8865. *alpha = beta;
  8866. }
  8867. }
  8868. return 0;
  8869. } /* NUMlapack_dlarfg */
  8870. #define t_ref(a_1,a_2) t[(a_2)*t_dim1 + a_1]
  8871. #define v_ref(a_1,a_2) v[(a_2)*v_dim1 + a_1]
  8872. int NUMlapack_dlarft (const char *direct, const char *storev, integer *n, integer *k, double *v, integer *ldv, double *tau,
  8873. double *t, integer *ldt) {
  8874. /* Table of constant values */
  8875. static integer c__1 = 1;
  8876. static double c_b8 = 0.;
  8877. /* System generated locals */
  8878. integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3;
  8879. double d__1;
  8880. /* Local variables */
  8881. static integer i__, j;
  8882. static double vii;
  8883. v_dim1 = *ldv;
  8884. v_offset = 1 + v_dim1 * 1;
  8885. v -= v_offset;
  8886. --tau;
  8887. t_dim1 = *ldt;
  8888. t_offset = 1 + t_dim1 * 1;
  8889. t -= t_offset;
  8890. /* Function Body */
  8891. if (*n == 0) {
  8892. return 0;
  8893. }
  8894. if (lsame_ (direct, "F")) {
  8895. i__1 = *k;
  8896. for (i__ = 1; i__ <= i__1; ++i__) {
  8897. if (tau[i__] == 0.) {
  8898. /* H(i) = I */
  8899. i__2 = i__;
  8900. for (j = 1; j <= i__2; ++j) {
  8901. t_ref (j, i__) = 0.;
  8902. /* L10: */
  8903. }
  8904. } else {
  8905. /* general case */
  8906. vii = v_ref (i__, i__);
  8907. v_ref (i__, i__) = 1.;
  8908. if (lsame_ (storev, "C")) {
  8909. /* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */
  8910. i__2 = *n - i__ + 1;
  8911. i__3 = i__ - 1;
  8912. d__1 = -tau[i__];
  8913. NUMblas_dgemv ("Transpose", &i__2, &i__3, &d__1, &v_ref (i__, 1), ldv, &v_ref (i__, i__), &c__1,
  8914. &c_b8, &t_ref (1, i__), &c__1);
  8915. } else {
  8916. /* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */
  8917. i__2 = i__ - 1;
  8918. i__3 = *n - i__ + 1;
  8919. d__1 = -tau[i__];
  8920. NUMblas_dgemv ("No transpose", &i__2, &i__3, &d__1, &v_ref (1, i__), ldv, &v_ref (i__, i__), ldv,
  8921. &c_b8, &t_ref (1, i__), &c__1);
  8922. }
  8923. v_ref (i__, i__) = vii;
  8924. /* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
  8925. i__2 = i__ - 1;
  8926. NUMblas_dtrmv ("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt, &t_ref (1, i__),
  8927. &c__1);
  8928. t_ref (i__, i__) = tau[i__];
  8929. }
  8930. /* L20: */
  8931. }
  8932. } else {
  8933. for (i__ = *k; i__ >= 1; --i__) {
  8934. if (tau[i__] == 0.) {
  8935. /* H(i) = I */
  8936. i__1 = *k;
  8937. for (j = i__; j <= i__1; ++j) {
  8938. t_ref (j, i__) = 0.;
  8939. /* L30: */
  8940. }
  8941. } else {
  8942. /* general case */
  8943. if (i__ < *k) {
  8944. if (lsame_ (storev, "C")) {
  8945. vii = v_ref (*n - *k + i__, i__);
  8946. v_ref (*n - *k + i__, i__) = 1.;
  8947. /* T(i+1:k,i) := - tau(i) * V(1:n-k+i,i+1:k)' *
  8948. V(1:n-k+i,i) */
  8949. i__1 = *n - *k + i__;
  8950. i__2 = *k - i__;
  8951. d__1 = -tau[i__];
  8952. NUMblas_dgemv ("Transpose", &i__1, &i__2, &d__1, &v_ref (1, i__ + 1), ldv, &v_ref (1, i__),
  8953. &c__1, &c_b8, &t_ref (i__ + 1, i__), &c__1);
  8954. v_ref (*n - *k + i__, i__) = vii;
  8955. } else {
  8956. vii = v_ref (i__, *n - *k + i__);
  8957. v_ref (i__, *n - *k + i__) = 1.;
  8958. /* T(i+1:k,i) := - tau(i) * V(i+1:k,1:n-k+i) *
  8959. V(i,1:n-k+i)' */
  8960. i__1 = *k - i__;
  8961. i__2 = *n - *k + i__;
  8962. d__1 = -tau[i__];
  8963. NUMblas_dgemv ("No transpose", &i__1, &i__2, &d__1, &v_ref (i__ + 1, 1), ldv, &v_ref (i__,
  8964. 1), ldv, &c_b8, &t_ref (i__ + 1, i__), &c__1);
  8965. v_ref (i__, *n - *k + i__) = vii;
  8966. }
  8967. /* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
  8968. i__1 = *k - i__;
  8969. NUMblas_dtrmv ("Lower", "No transpose", "Non-unit", &i__1, &t_ref (i__ + 1, i__ + 1), ldt,
  8970. &t_ref (i__ + 1, i__), &c__1);
  8971. }
  8972. t_ref (i__, i__) = tau[i__];
  8973. }
  8974. /* L40: */
  8975. }
  8976. }
  8977. return 0;
  8978. } /* NUMlapack_dlarft */
  8979. #undef v_ref
  8980. #undef t_ref
  8981. int NUMlapack_dlartg (double *f, double *g, double *cs, double *sn, double *r__) {
  8982. /* Initialized data */
  8983. static integer first = TRUE;
  8984. /* System generated locals */
  8985. integer i__1;
  8986. double d__1, d__2;
  8987. /* Local variables */
  8988. static integer i__;
  8989. static double scale;
  8990. static integer count;
  8991. static double f1, g1, safmn2, safmx2;
  8992. static double safmin, eps;
  8993. if (first) {
  8994. first = FALSE;
  8995. safmin = NUMblas_dlamch ("S");
  8996. eps = NUMblas_dlamch ("E");
  8997. d__1 = NUMblas_dlamch ("B");
  8998. i__1 = (integer) (log (safmin / eps) / log (NUMblas_dlamch ("B")) / 2.);
  8999. safmn2 = pow_di (&d__1, &i__1);
  9000. safmx2 = 1. / safmn2;
  9001. }
  9002. if (*g == 0.) {
  9003. *cs = 1.;
  9004. *sn = 0.;
  9005. *r__ = *f;
  9006. } else if (*f == 0.) {
  9007. *cs = 0.;
  9008. *sn = 1.;
  9009. *r__ = *g;
  9010. } else {
  9011. f1 = *f;
  9012. g1 = *g;
  9013. /* Computing MAX */
  9014. d__1 = fabs (f1), d__2 = fabs (g1);
  9015. scale = MAX (d__1, d__2);
  9016. if (scale >= safmx2) {
  9017. count = 0;
  9018. L10:
  9019. ++count;
  9020. f1 *= safmn2;
  9021. g1 *= safmn2;
  9022. /* Computing MAX */
  9023. d__1 = fabs (f1), d__2 = fabs (g1);
  9024. scale = MAX (d__1, d__2);
  9025. if (scale >= safmx2) {
  9026. goto L10;
  9027. }
  9028. /* Computing 2nd power */
  9029. d__1 = f1;
  9030. /* Computing 2nd power */
  9031. d__2 = g1;
  9032. *r__ = sqrt (d__1 * d__1 + d__2 * d__2);
  9033. *cs = f1 / *r__;
  9034. *sn = g1 / *r__;
  9035. i__1 = count;
  9036. for (i__ = 1; i__ <= i__1; ++i__) {
  9037. *r__ *= safmx2;
  9038. /* L20: */
  9039. }
  9040. } else if (scale <= safmn2) {
  9041. count = 0;
  9042. L30:
  9043. ++count;
  9044. f1 *= safmx2;
  9045. g1 *= safmx2;
  9046. /* Computing MAX */
  9047. d__1 = fabs (f1), d__2 = fabs (g1);
  9048. scale = MAX (d__1, d__2);
  9049. if (scale <= safmn2) {
  9050. goto L30;
  9051. }
  9052. /* Computing 2nd power */
  9053. d__1 = f1;
  9054. /* Computing 2nd power */
  9055. d__2 = g1;
  9056. *r__ = sqrt (d__1 * d__1 + d__2 * d__2);
  9057. *cs = f1 / *r__;
  9058. *sn = g1 / *r__;
  9059. i__1 = count;
  9060. for (i__ = 1; i__ <= i__1; ++i__) {
  9061. *r__ *= safmn2;
  9062. /* L40: */
  9063. }
  9064. } else {
  9065. /* Computing 2nd power */
  9066. d__1 = f1;
  9067. /* Computing 2nd power */
  9068. d__2 = g1;
  9069. *r__ = sqrt (d__1 * d__1 + d__2 * d__2);
  9070. *cs = f1 / *r__;
  9071. *sn = g1 / *r__;
  9072. }
  9073. if (fabs (*f) > fabs (*g) && *cs < 0.) {
  9074. *cs = - (*cs);
  9075. *sn = - (*sn);
  9076. *r__ = - (*r__);
  9077. }
  9078. }
  9079. return 0;
  9080. } /* NUMlapack_dlartg */
  9081. int NUMlapack_dlarfx (const char *side, integer *m, integer *n, double *v, double *tau, double *c__, integer *ldc,
  9082. double *work) {
  9083. /* Table of constant values */
  9084. static double c_b14 = 1.;
  9085. static integer c__1 = 1;
  9086. static double c_b16 = 0.;
  9087. /* System generated locals */
  9088. integer c_dim1, c_offset, i__1;
  9089. double d__1;
  9090. /* Local variables */
  9091. static integer j;
  9092. static double t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6, v7, v8, v9, t10, v10, sum;
  9093. --v;
  9094. c_dim1 = *ldc;
  9095. c_offset = 1 + c_dim1 * 1;
  9096. c__ -= c_offset;
  9097. --work;
  9098. /* Function Body */
  9099. if (*tau == 0.) {
  9100. return 0;
  9101. }
  9102. if (lsame_ (side, "L")) {
  9103. /* Form H * C, where H has order m. */
  9104. switch (*m) {
  9105. case 1:
  9106. goto L10;
  9107. case 2:
  9108. goto L30;
  9109. case 3:
  9110. goto L50;
  9111. case 4:
  9112. goto L70;
  9113. case 5:
  9114. goto L90;
  9115. case 6:
  9116. goto L110;
  9117. case 7:
  9118. goto L130;
  9119. case 8:
  9120. goto L150;
  9121. case 9:
  9122. goto L170;
  9123. case 10:
  9124. goto L190;
  9125. }
  9126. /* Code for general M
  9127. w := C'*v */
  9128. NUMblas_dgemv ("Transpose", m, n, &c_b14, &c__[c_offset], ldc, &v[1], &c__1, &c_b16, &work[1],
  9129. &c__1);
  9130. /* C := C - tau * v * w' */
  9131. d__1 = - (*tau);
  9132. NUMblas_dger (m, n, &d__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset], ldc);
  9133. goto L410;
  9134. L10:
  9135. /* Special code for 1 x 1 Householder */
  9136. t1 = 1. - *tau * v[1] * v[1];
  9137. i__1 = *n;
  9138. for (j = 1; j <= i__1; ++j) {
  9139. c___ref (1, j) = t1 * c___ref (1, j);
  9140. /* L20: */
  9141. }
  9142. goto L410;
  9143. L30:
  9144. /* Special code for 2 x 2 Householder */
  9145. v1 = v[1];
  9146. t1 = *tau * v1;
  9147. v2 = v[2];
  9148. t2 = *tau * v2;
  9149. i__1 = *n;
  9150. for (j = 1; j <= i__1; ++j) {
  9151. sum = v1 * c___ref (1, j) + v2 * c___ref (2, j);
  9152. c___ref (1, j) = c___ref (1, j) - sum * t1;
  9153. c___ref (2, j) = c___ref (2, j) - sum * t2;
  9154. /* L40: */
  9155. }
  9156. goto L410;
  9157. L50:
  9158. /* Special code for 3 x 3 Householder */
  9159. v1 = v[1];
  9160. t1 = *tau * v1;
  9161. v2 = v[2];
  9162. t2 = *tau * v2;
  9163. v3 = v[3];
  9164. t3 = *tau * v3;
  9165. i__1 = *n;
  9166. for (j = 1; j <= i__1; ++j) {
  9167. sum = v1 * c___ref (1, j) + v2 * c___ref (2, j) + v3 * c___ref (3, j);
  9168. c___ref (1, j) = c___ref (1, j) - sum * t1;
  9169. c___ref (2, j) = c___ref (2, j) - sum * t2;
  9170. c___ref (3, j) = c___ref (3, j) - sum * t3;
  9171. /* L60: */
  9172. }
  9173. goto L410;
  9174. L70:
  9175. /* Special code for 4 x 4 Householder */
  9176. v1 = v[1];
  9177. t1 = *tau * v1;
  9178. v2 = v[2];
  9179. t2 = *tau * v2;
  9180. v3 = v[3];
  9181. t3 = *tau * v3;
  9182. v4 = v[4];
  9183. t4 = *tau * v4;
  9184. i__1 = *n;
  9185. for (j = 1; j <= i__1; ++j) {
  9186. sum = v1 * c___ref (1, j) + v2 * c___ref (2, j) + v3 * c___ref (3, j) + v4 * c___ref (4, j);
  9187. c___ref (1, j) = c___ref (1, j) - sum * t1;
  9188. c___ref (2, j) = c___ref (2, j) - sum * t2;
  9189. c___ref (3, j) = c___ref (3, j) - sum * t3;
  9190. c___ref (4, j) = c___ref (4, j) - sum * t4;
  9191. /* L80: */
  9192. }
  9193. goto L410;
  9194. L90:
  9195. /* Special code for 5 x 5 Householder */
  9196. v1 = v[1];
  9197. t1 = *tau * v1;
  9198. v2 = v[2];
  9199. t2 = *tau * v2;
  9200. v3 = v[3];
  9201. t3 = *tau * v3;
  9202. v4 = v[4];
  9203. t4 = *tau * v4;
  9204. v5 = v[5];
  9205. t5 = *tau * v5;
  9206. i__1 = *n;
  9207. for (j = 1; j <= i__1; ++j) {
  9208. sum =
  9209. v1 * c___ref (1, j) + v2 * c___ref (2, j) + v3 * c___ref (3, j) + v4 * c___ref (4,
  9210. j) + v5 * c___ref (5, j);
  9211. c___ref (1, j) = c___ref (1, j) - sum * t1;
  9212. c___ref (2, j) = c___ref (2, j) - sum * t2;
  9213. c___ref (3, j) = c___ref (3, j) - sum * t3;
  9214. c___ref (4, j) = c___ref (4, j) - sum * t4;
  9215. c___ref (5, j) = c___ref (5, j) - sum * t5;
  9216. /* L100: */
  9217. }
  9218. goto L410;
  9219. L110:
  9220. /* Special code for 6 x 6 Householder */
  9221. v1 = v[1];
  9222. t1 = *tau * v1;
  9223. v2 = v[2];
  9224. t2 = *tau * v2;
  9225. v3 = v[3];
  9226. t3 = *tau * v3;
  9227. v4 = v[4];
  9228. t4 = *tau * v4;
  9229. v5 = v[5];
  9230. t5 = *tau * v5;
  9231. v6 = v[6];
  9232. t6 = *tau * v6;
  9233. i__1 = *n;
  9234. for (j = 1; j <= i__1; ++j) {
  9235. sum =
  9236. v1 * c___ref (1, j) + v2 * c___ref (2, j) + v3 * c___ref (3, j) + v4 * c___ref (4,
  9237. j) + v5 * c___ref (5, j) + v6 * c___ref (6, j);
  9238. c___ref (1, j) = c___ref (1, j) - sum * t1;
  9239. c___ref (2, j) = c___ref (2, j) - sum * t2;
  9240. c___ref (3, j) = c___ref (3, j) - sum * t3;
  9241. c___ref (4, j) = c___ref (4, j) - sum * t4;
  9242. c___ref (5, j) = c___ref (5, j) - sum * t5;
  9243. c___ref (6, j) = c___ref (6, j) - sum * t6;
  9244. /* L120: */
  9245. }
  9246. goto L410;
  9247. L130:
  9248. /* Special code for 7 x 7 Householder */
  9249. v1 = v[1];
  9250. t1 = *tau * v1;
  9251. v2 = v[2];
  9252. t2 = *tau * v2;
  9253. v3 = v[3];
  9254. t3 = *tau * v3;
  9255. v4 = v[4];
  9256. t4 = *tau * v4;
  9257. v5 = v[5];
  9258. t5 = *tau * v5;
  9259. v6 = v[6];
  9260. t6 = *tau * v6;
  9261. v7 = v[7];
  9262. t7 = *tau * v7;
  9263. i__1 = *n;
  9264. for (j = 1; j <= i__1; ++j) {
  9265. sum =
  9266. v1 * c___ref (1, j) + v2 * c___ref (2, j) + v3 * c___ref (3, j) + v4 * c___ref (4,
  9267. j) + v5 * c___ref (5, j) + v6 * c___ref (6, j) + v7 * c___ref (7, j);
  9268. c___ref (1, j) = c___ref (1, j) - sum * t1;
  9269. c___ref (2, j) = c___ref (2, j) - sum * t2;
  9270. c___ref (3, j) = c___ref (3, j) - sum * t3;
  9271. c___ref (4, j) = c___ref (4, j) - sum * t4;
  9272. c___ref (5, j) = c___ref (5, j) - sum * t5;
  9273. c___ref (6, j) = c___ref (6, j) - sum * t6;
  9274. c___ref (7, j) = c___ref (7, j) - sum * t7;
  9275. /* L140: */
  9276. }
  9277. goto L410;
  9278. L150:
  9279. /* Special code for 8 x 8 Householder */
  9280. v1 = v[1];
  9281. t1 = *tau * v1;
  9282. v2 = v[2];
  9283. t2 = *tau * v2;
  9284. v3 = v[3];
  9285. t3 = *tau * v3;
  9286. v4 = v[4];
  9287. t4 = *tau * v4;
  9288. v5 = v[5];
  9289. t5 = *tau * v5;
  9290. v6 = v[6];
  9291. t6 = *tau * v6;
  9292. v7 = v[7];
  9293. t7 = *tau * v7;
  9294. v8 = v[8];
  9295. t8 = *tau * v8;
  9296. i__1 = *n;
  9297. for (j = 1; j <= i__1; ++j) {
  9298. sum =
  9299. v1 * c___ref (1, j) + v2 * c___ref (2, j) + v3 * c___ref (3, j) + v4 * c___ref (4,
  9300. j) + v5 * c___ref (5, j) + v6 * c___ref (6, j) + v7 * c___ref (7, j) + v8 * c___ref (8, j);
  9301. c___ref (1, j) = c___ref (1, j) - sum * t1;
  9302. c___ref (2, j) = c___ref (2, j) - sum * t2;
  9303. c___ref (3, j) = c___ref (3, j) - sum * t3;
  9304. c___ref (4, j) = c___ref (4, j) - sum * t4;
  9305. c___ref (5, j) = c___ref (5, j) - sum * t5;
  9306. c___ref (6, j) = c___ref (6, j) - sum * t6;
  9307. c___ref (7, j) = c___ref (7, j) - sum * t7;
  9308. c___ref (8, j) = c___ref (8, j) - sum * t8;
  9309. /* L160: */
  9310. }
  9311. goto L410;
  9312. L170:
  9313. /* Special code for 9 x 9 Householder */
  9314. v1 = v[1];
  9315. t1 = *tau * v1;
  9316. v2 = v[2];
  9317. t2 = *tau * v2;
  9318. v3 = v[3];
  9319. t3 = *tau * v3;
  9320. v4 = v[4];
  9321. t4 = *tau * v4;
  9322. v5 = v[5];
  9323. t5 = *tau * v5;
  9324. v6 = v[6];
  9325. t6 = *tau * v6;
  9326. v7 = v[7];
  9327. t7 = *tau * v7;
  9328. v8 = v[8];
  9329. t8 = *tau * v8;
  9330. v9 = v[9];
  9331. t9 = *tau * v9;
  9332. i__1 = *n;
  9333. for (j = 1; j <= i__1; ++j) {
  9334. sum =
  9335. v1 * c___ref (1, j) + v2 * c___ref (2, j) + v3 * c___ref (3, j) + v4 * c___ref (4,
  9336. j) + v5 * c___ref (5, j) + v6 * c___ref (6, j) + v7 * c___ref (7, j) + v8 * c___ref (8,
  9337. j) + v9 * c___ref (9, j);
  9338. c___ref (1, j) = c___ref (1, j) - sum * t1;
  9339. c___ref (2, j) = c___ref (2, j) - sum * t2;
  9340. c___ref (3, j) = c___ref (3, j) - sum * t3;
  9341. c___ref (4, j) = c___ref (4, j) - sum * t4;
  9342. c___ref (5, j) = c___ref (5, j) - sum * t5;
  9343. c___ref (6, j) = c___ref (6, j) - sum * t6;
  9344. c___ref (7, j) = c___ref (7, j) - sum * t7;
  9345. c___ref (8, j) = c___ref (8, j) - sum * t8;
  9346. c___ref (9, j) = c___ref (9, j) - sum * t9;
  9347. /* L180: */
  9348. }
  9349. goto L410;
  9350. L190:
  9351. /* Special code for 10 x 10 Householder */
  9352. v1 = v[1];
  9353. t1 = *tau * v1;
  9354. v2 = v[2];
  9355. t2 = *tau * v2;
  9356. v3 = v[3];
  9357. t3 = *tau * v3;
  9358. v4 = v[4];
  9359. t4 = *tau * v4;
  9360. v5 = v[5];
  9361. t5 = *tau * v5;
  9362. v6 = v[6];
  9363. t6 = *tau * v6;
  9364. v7 = v[7];
  9365. t7 = *tau * v7;
  9366. v8 = v[8];
  9367. t8 = *tau * v8;
  9368. v9 = v[9];
  9369. t9 = *tau * v9;
  9370. v10 = v[10];
  9371. t10 = *tau * v10;
  9372. i__1 = *n;
  9373. for (j = 1; j <= i__1; ++j) {
  9374. sum =
  9375. v1 * c___ref (1, j) + v2 * c___ref (2, j) + v3 * c___ref (3, j) + v4 * c___ref (4,
  9376. j) + v5 * c___ref (5, j) + v6 * c___ref (6, j) + v7 * c___ref (7, j) + v8 * c___ref (8,
  9377. j) + v9 * c___ref (9, j) + v10 * c___ref (10, j);
  9378. c___ref (1, j) = c___ref (1, j) - sum * t1;
  9379. c___ref (2, j) = c___ref (2, j) - sum * t2;
  9380. c___ref (3, j) = c___ref (3, j) - sum * t3;
  9381. c___ref (4, j) = c___ref (4, j) - sum * t4;
  9382. c___ref (5, j) = c___ref (5, j) - sum * t5;
  9383. c___ref (6, j) = c___ref (6, j) - sum * t6;
  9384. c___ref (7, j) = c___ref (7, j) - sum * t7;
  9385. c___ref (8, j) = c___ref (8, j) - sum * t8;
  9386. c___ref (9, j) = c___ref (9, j) - sum * t9;
  9387. c___ref (10, j) = c___ref (10, j) - sum * t10;
  9388. /* L200: */
  9389. }
  9390. goto L410;
  9391. } else {
  9392. /* Form C * H, where H has order n. */
  9393. switch (*n) {
  9394. case 1:
  9395. goto L210;
  9396. case 2:
  9397. goto L230;
  9398. case 3:
  9399. goto L250;
  9400. case 4:
  9401. goto L270;
  9402. case 5:
  9403. goto L290;
  9404. case 6:
  9405. goto L310;
  9406. case 7:
  9407. goto L330;
  9408. case 8:
  9409. goto L350;
  9410. case 9:
  9411. goto L370;
  9412. case 10:
  9413. goto L390;
  9414. }
  9415. /* Code for general N
  9416. w := C * v */
  9417. NUMblas_dgemv ("No transpose", m, n, &c_b14, &c__[c_offset], ldc, &v[1], &c__1, &c_b16, &work[1],
  9418. &c__1);
  9419. /* C := C - tau * w * v' */
  9420. d__1 = - (*tau);
  9421. NUMblas_dger (m, n, &d__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset], ldc);
  9422. goto L410;
  9423. L210:
  9424. /* Special code for 1 x 1 Householder */
  9425. t1 = 1. - *tau * v[1] * v[1];
  9426. i__1 = *m;
  9427. for (j = 1; j <= i__1; ++j) {
  9428. c___ref (j, 1) = t1 * c___ref (j, 1);
  9429. /* L220: */
  9430. }
  9431. goto L410;
  9432. L230:
  9433. /* Special code for 2 x 2 Householder */
  9434. v1 = v[1];
  9435. t1 = *tau * v1;
  9436. v2 = v[2];
  9437. t2 = *tau * v2;
  9438. i__1 = *m;
  9439. for (j = 1; j <= i__1; ++j) {
  9440. sum = v1 * c___ref (j, 1) + v2 * c___ref (j, 2);
  9441. c___ref (j, 1) = c___ref (j, 1) - sum * t1;
  9442. c___ref (j, 2) = c___ref (j, 2) - sum * t2;
  9443. /* L240: */
  9444. }
  9445. goto L410;
  9446. L250:
  9447. /* Special code for 3 x 3 Householder */
  9448. v1 = v[1];
  9449. t1 = *tau * v1;
  9450. v2 = v[2];
  9451. t2 = *tau * v2;
  9452. v3 = v[3];
  9453. t3 = *tau * v3;
  9454. i__1 = *m;
  9455. for (j = 1; j <= i__1; ++j) {
  9456. sum = v1 * c___ref (j, 1) + v2 * c___ref (j, 2) + v3 * c___ref (j, 3);
  9457. c___ref (j, 1) = c___ref (j, 1) - sum * t1;
  9458. c___ref (j, 2) = c___ref (j, 2) - sum * t2;
  9459. c___ref (j, 3) = c___ref (j, 3) - sum * t3;
  9460. /* L260: */
  9461. }
  9462. goto L410;
  9463. L270:
  9464. /* Special code for 4 x 4 Householder */
  9465. v1 = v[1];
  9466. t1 = *tau * v1;
  9467. v2 = v[2];
  9468. t2 = *tau * v2;
  9469. v3 = v[3];
  9470. t3 = *tau * v3;
  9471. v4 = v[4];
  9472. t4 = *tau * v4;
  9473. i__1 = *m;
  9474. for (j = 1; j <= i__1; ++j) {
  9475. sum = v1 * c___ref (j, 1) + v2 * c___ref (j, 2) + v3 * c___ref (j, 3) + v4 * c___ref (j, 4);
  9476. c___ref (j, 1) = c___ref (j, 1) - sum * t1;
  9477. c___ref (j, 2) = c___ref (j, 2) - sum * t2;
  9478. c___ref (j, 3) = c___ref (j, 3) - sum * t3;
  9479. c___ref (j, 4) = c___ref (j, 4) - sum * t4;
  9480. /* L280: */
  9481. }
  9482. goto L410;
  9483. L290:
  9484. /* Special code for 5 x 5 Householder */
  9485. v1 = v[1];
  9486. t1 = *tau * v1;
  9487. v2 = v[2];
  9488. t2 = *tau * v2;
  9489. v3 = v[3];
  9490. t3 = *tau * v3;
  9491. v4 = v[4];
  9492. t4 = *tau * v4;
  9493. v5 = v[5];
  9494. t5 = *tau * v5;
  9495. i__1 = *m;
  9496. for (j = 1; j <= i__1; ++j) {
  9497. sum =
  9498. v1 * c___ref (j, 1) + v2 * c___ref (j, 2) + v3 * c___ref (j, 3) + v4 * c___ref (j,
  9499. 4) + v5 * c___ref (j, 5);
  9500. c___ref (j, 1) = c___ref (j, 1) - sum * t1;
  9501. c___ref (j, 2) = c___ref (j, 2) - sum * t2;
  9502. c___ref (j, 3) = c___ref (j, 3) - sum * t3;
  9503. c___ref (j, 4) = c___ref (j, 4) - sum * t4;
  9504. c___ref (j, 5) = c___ref (j, 5) - sum * t5;
  9505. /* L300: */
  9506. }
  9507. goto L410;
  9508. L310:
  9509. /* Special code for 6 x 6 Householder */
  9510. v1 = v[1];
  9511. t1 = *tau * v1;
  9512. v2 = v[2];
  9513. t2 = *tau * v2;
  9514. v3 = v[3];
  9515. t3 = *tau * v3;
  9516. v4 = v[4];
  9517. t4 = *tau * v4;
  9518. v5 = v[5];
  9519. t5 = *tau * v5;
  9520. v6 = v[6];
  9521. t6 = *tau * v6;
  9522. i__1 = *m;
  9523. for (j = 1; j <= i__1; ++j) {
  9524. sum =
  9525. v1 * c___ref (j, 1) + v2 * c___ref (j, 2) + v3 * c___ref (j, 3) + v4 * c___ref (j,
  9526. 4) + v5 * c___ref (j, 5) + v6 * c___ref (j, 6);
  9527. c___ref (j, 1) = c___ref (j, 1) - sum * t1;
  9528. c___ref (j, 2) = c___ref (j, 2) - sum * t2;
  9529. c___ref (j, 3) = c___ref (j, 3) - sum * t3;
  9530. c___ref (j, 4) = c___ref (j, 4) - sum * t4;
  9531. c___ref (j, 5) = c___ref (j, 5) - sum * t5;
  9532. c___ref (j, 6) = c___ref (j, 6) - sum * t6;
  9533. /* L320: */
  9534. }
  9535. goto L410;
  9536. L330:
  9537. /* Special code for 7 x 7 Householder */
  9538. v1 = v[1];
  9539. t1 = *tau * v1;
  9540. v2 = v[2];
  9541. t2 = *tau * v2;
  9542. v3 = v[3];
  9543. t3 = *tau * v3;
  9544. v4 = v[4];
  9545. t4 = *tau * v4;
  9546. v5 = v[5];
  9547. t5 = *tau * v5;
  9548. v6 = v[6];
  9549. t6 = *tau * v6;
  9550. v7 = v[7];
  9551. t7 = *tau * v7;
  9552. i__1 = *m;
  9553. for (j = 1; j <= i__1; ++j) {
  9554. sum =
  9555. v1 * c___ref (j, 1) + v2 * c___ref (j, 2) + v3 * c___ref (j, 3) + v4 * c___ref (j,
  9556. 4) + v5 * c___ref (j, 5) + v6 * c___ref (j, 6) + v7 * c___ref (j, 7);
  9557. c___ref (j, 1) = c___ref (j, 1) - sum * t1;
  9558. c___ref (j, 2) = c___ref (j, 2) - sum * t2;
  9559. c___ref (j, 3) = c___ref (j, 3) - sum * t3;
  9560. c___ref (j, 4) = c___ref (j, 4) - sum * t4;
  9561. c___ref (j, 5) = c___ref (j, 5) - sum * t5;
  9562. c___ref (j, 6) = c___ref (j, 6) - sum * t6;
  9563. c___ref (j, 7) = c___ref (j, 7) - sum * t7;
  9564. /* L340: */
  9565. }
  9566. goto L410;
  9567. L350:
  9568. /* Special code for 8 x 8 Householder */
  9569. v1 = v[1];
  9570. t1 = *tau * v1;
  9571. v2 = v[2];
  9572. t2 = *tau * v2;
  9573. v3 = v[3];
  9574. t3 = *tau * v3;
  9575. v4 = v[4];
  9576. t4 = *tau * v4;
  9577. v5 = v[5];
  9578. t5 = *tau * v5;
  9579. v6 = v[6];
  9580. t6 = *tau * v6;
  9581. v7 = v[7];
  9582. t7 = *tau * v7;
  9583. v8 = v[8];
  9584. t8 = *tau * v8;
  9585. i__1 = *m;
  9586. for (j = 1; j <= i__1; ++j) {
  9587. sum =
  9588. v1 * c___ref (j, 1) + v2 * c___ref (j, 2) + v3 * c___ref (j, 3) + v4 * c___ref (j,
  9589. 4) + v5 * c___ref (j, 5) + v6 * c___ref (j, 6) + v7 * c___ref (j, 7) + v8 * c___ref (j, 8);
  9590. c___ref (j, 1) = c___ref (j, 1) - sum * t1;
  9591. c___ref (j, 2) = c___ref (j, 2) - sum * t2;
  9592. c___ref (j, 3) = c___ref (j, 3) - sum * t3;
  9593. c___ref (j, 4) = c___ref (j, 4) - sum * t4;
  9594. c___ref (j, 5) = c___ref (j, 5) - sum * t5;
  9595. c___ref (j, 6) = c___ref (j, 6) - sum * t6;
  9596. c___ref (j, 7) = c___ref (j, 7) - sum * t7;
  9597. c___ref (j, 8) = c___ref (j, 8) - sum * t8;
  9598. /* L360: */
  9599. }
  9600. goto L410;
  9601. L370:
  9602. /* Special code for 9 x 9 Householder */
  9603. v1 = v[1];
  9604. t1 = *tau * v1;
  9605. v2 = v[2];
  9606. t2 = *tau * v2;
  9607. v3 = v[3];
  9608. t3 = *tau * v3;
  9609. v4 = v[4];
  9610. t4 = *tau * v4;
  9611. v5 = v[5];
  9612. t5 = *tau * v5;
  9613. v6 = v[6];
  9614. t6 = *tau * v6;
  9615. v7 = v[7];
  9616. t7 = *tau * v7;
  9617. v8 = v[8];
  9618. t8 = *tau * v8;
  9619. v9 = v[9];
  9620. t9 = *tau * v9;
  9621. i__1 = *m;
  9622. for (j = 1; j <= i__1; ++j) {
  9623. sum =
  9624. v1 * c___ref (j, 1) + v2 * c___ref (j, 2) + v3 * c___ref (j, 3) + v4 * c___ref (j,
  9625. 4) + v5 * c___ref (j, 5) + v6 * c___ref (j, 6) + v7 * c___ref (j, 7) + v8 * c___ref (j,
  9626. 8) + v9 * c___ref (j, 9);
  9627. c___ref (j, 1) = c___ref (j, 1) - sum * t1;
  9628. c___ref (j, 2) = c___ref (j, 2) - sum * t2;
  9629. c___ref (j, 3) = c___ref (j, 3) - sum * t3;
  9630. c___ref (j, 4) = c___ref (j, 4) - sum * t4;
  9631. c___ref (j, 5) = c___ref (j, 5) - sum * t5;
  9632. c___ref (j, 6) = c___ref (j, 6) - sum * t6;
  9633. c___ref (j, 7) = c___ref (j, 7) - sum * t7;
  9634. c___ref (j, 8) = c___ref (j, 8) - sum * t8;
  9635. c___ref (j, 9) = c___ref (j, 9) - sum * t9;
  9636. /* L380: */
  9637. }
  9638. goto L410;
  9639. L390:
  9640. /* Special code for 10 x 10 Householder */
  9641. v1 = v[1];
  9642. t1 = *tau * v1;
  9643. v2 = v[2];
  9644. t2 = *tau * v2;
  9645. v3 = v[3];
  9646. t3 = *tau * v3;
  9647. v4 = v[4];
  9648. t4 = *tau * v4;
  9649. v5 = v[5];
  9650. t5 = *tau * v5;
  9651. v6 = v[6];
  9652. t6 = *tau * v6;
  9653. v7 = v[7];
  9654. t7 = *tau * v7;
  9655. v8 = v[8];
  9656. t8 = *tau * v8;
  9657. v9 = v[9];
  9658. t9 = *tau * v9;
  9659. v10 = v[10];
  9660. t10 = *tau * v10;
  9661. i__1 = *m;
  9662. for (j = 1; j <= i__1; ++j) {
  9663. sum =
  9664. v1 * c___ref (j, 1) + v2 * c___ref (j, 2) + v3 * c___ref (j, 3) + v4 * c___ref (j,
  9665. 4) + v5 * c___ref (j, 5) + v6 * c___ref (j, 6) + v7 * c___ref (j, 7) + v8 * c___ref (j,
  9666. 8) + v9 * c___ref (j, 9) + v10 * c___ref (j, 10);
  9667. c___ref (j, 1) = c___ref (j, 1) - sum * t1;
  9668. c___ref (j, 2) = c___ref (j, 2) - sum * t2;
  9669. c___ref (j, 3) = c___ref (j, 3) - sum * t3;
  9670. c___ref (j, 4) = c___ref (j, 4) - sum * t4;
  9671. c___ref (j, 5) = c___ref (j, 5) - sum * t5;
  9672. c___ref (j, 6) = c___ref (j, 6) - sum * t6;
  9673. c___ref (j, 7) = c___ref (j, 7) - sum * t7;
  9674. c___ref (j, 8) = c___ref (j, 8) - sum * t8;
  9675. c___ref (j, 9) = c___ref (j, 9) - sum * t9;
  9676. c___ref (j, 10) = c___ref (j, 10) - sum * t10;
  9677. /* L400: */
  9678. }
  9679. goto L410;
  9680. }
  9681. L410:
  9682. return 0;
  9683. } /* NUMlapack_dlarfx */
  9684. int NUMlapack_dlas2 (double *f, double *g, double *h__, double *ssmin, double *ssmax) {
  9685. /* System generated locals */
  9686. double d__1, d__2;
  9687. /* Local variables */
  9688. static double fhmn, fhmx, c__, fa, ga, ha, as, at, au;
  9689. fa = fabs (*f);
  9690. ga = fabs (*g);
  9691. ha = fabs (*h__);
  9692. fhmn = MIN (fa, ha);
  9693. fhmx = MAX (fa, ha);
  9694. if (fhmn == 0.) {
  9695. *ssmin = 0.;
  9696. if (fhmx == 0.) {
  9697. *ssmax = ga;
  9698. } else {
  9699. /* Computing 2nd power */
  9700. d__1 = MIN (fhmx, ga) / MAX (fhmx, ga);
  9701. *ssmax = MAX (fhmx, ga) * sqrt (d__1 * d__1 + 1.);
  9702. }
  9703. } else {
  9704. if (ga < fhmx) {
  9705. as = fhmn / fhmx + 1.;
  9706. at = (fhmx - fhmn) / fhmx;
  9707. /* Computing 2nd power */
  9708. d__1 = ga / fhmx;
  9709. au = d__1 * d__1;
  9710. c__ = 2. / (sqrt (as * as + au) + sqrt (at * at + au));
  9711. *ssmin = fhmn * c__;
  9712. *ssmax = fhmx / c__;
  9713. } else {
  9714. au = fhmx / ga;
  9715. if (au == 0.) {
  9716. /* Avoid possible harmful underflow if exponent range
  9717. asymmetric (true SSMIN may not underflow even if AU
  9718. underflows) */
  9719. *ssmin = fhmn * fhmx / ga;
  9720. *ssmax = ga;
  9721. } else {
  9722. as = fhmn / fhmx + 1.;
  9723. at = (fhmx - fhmn) / fhmx;
  9724. /* Computing 2nd power */
  9725. d__1 = as * au;
  9726. /* Computing 2nd power */
  9727. d__2 = at * au;
  9728. c__ = 1. / (sqrt (d__1 * d__1 + 1.) + sqrt (d__2 * d__2 + 1.));
  9729. *ssmin = fhmn * c__ * au;
  9730. *ssmin += *ssmin;
  9731. *ssmax = ga / (c__ + c__);
  9732. }
  9733. }
  9734. }
  9735. return 0;
  9736. } /* NUMlapack_dlas2 */
  9737. int NUMlapack_dlascl (const char *type__, integer *kl, integer *ku, double *cfrom, double *cto, integer *m, integer *n, double *a,
  9738. integer *lda, integer *info) {
  9739. /* System generated locals */
  9740. integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
  9741. /* Local variables */
  9742. static integer done;
  9743. static double ctoc;
  9744. static integer i__, j;
  9745. static integer itype, k1, k2, k3, k4;
  9746. static double cfrom1;
  9747. static double cfromc;
  9748. static double bignum, smlnum, mul, cto1;
  9749. a_dim1 = *lda;
  9750. a_offset = 1 + a_dim1 * 1;
  9751. a -= a_offset;
  9752. /* Function Body */
  9753. *info = 0;
  9754. if (lsame_ (type__, "G")) {
  9755. itype = 0;
  9756. } else if (lsame_ (type__, "L")) {
  9757. itype = 1;
  9758. } else if (lsame_ (type__, "U")) {
  9759. itype = 2;
  9760. } else if (lsame_ (type__, "H")) {
  9761. itype = 3;
  9762. } else if (lsame_ (type__, "B")) {
  9763. itype = 4;
  9764. } else if (lsame_ (type__, "Q")) {
  9765. itype = 5;
  9766. } else if (lsame_ (type__, "Z")) {
  9767. itype = 6;
  9768. } else {
  9769. itype = -1;
  9770. }
  9771. if (itype == -1) {
  9772. *info = -1;
  9773. } else if (*cfrom == 0.) {
  9774. *info = -4;
  9775. } else if (*m < 0) {
  9776. *info = -6;
  9777. } else if (*n < 0 || itype == 4 && *n != *m || itype == 5 && *n != *m) {
  9778. *info = -7;
  9779. } else if (itype <= 3 && *lda < MAX (1, *m)) {
  9780. *info = -9;
  9781. } else if (itype >= 4) {
  9782. /* Computing MAX */
  9783. i__1 = *m - 1;
  9784. if (*kl < 0 || *kl > MAX (i__1, 0)) {
  9785. *info = -2;
  9786. } else { /* if(complicated condition) */
  9787. /* Computing MAX */
  9788. i__1 = *n - 1;
  9789. if (*ku < 0 || *ku > MAX (i__1, 0) || (itype == 4 || itype == 5) && *kl != *ku) {
  9790. *info = -3;
  9791. } else if (itype == 4 && *lda < *kl + 1 || itype == 5 && *lda < *ku + 1 || itype == 6 &&
  9792. *lda < (*kl << 1) + *ku + 1) {
  9793. *info = -9;
  9794. }
  9795. }
  9796. }
  9797. if (*info != 0) {
  9798. i__1 = - (*info);
  9799. xerbla_ ("DLASCL", &i__1);
  9800. return 0;
  9801. }
  9802. /* Quick return if possible */
  9803. if (*n == 0 || *m == 0) {
  9804. return 0;
  9805. }
  9806. /* Get machine parameters */
  9807. smlnum = NUMblas_dlamch ("S");
  9808. bignum = 1. / smlnum;
  9809. cfromc = *cfrom;
  9810. ctoc = *cto;
  9811. L10:
  9812. cfrom1 = cfromc * smlnum;
  9813. cto1 = ctoc / bignum;
  9814. if (fabs (cfrom1) > fabs (ctoc) && ctoc != 0.) {
  9815. mul = smlnum;
  9816. done = FALSE;
  9817. cfromc = cfrom1;
  9818. } else if (fabs (cto1) > fabs (cfromc)) {
  9819. mul = bignum;
  9820. done = FALSE;
  9821. ctoc = cto1;
  9822. } else {
  9823. mul = ctoc / cfromc;
  9824. done = TRUE;
  9825. }
  9826. if (itype == 0) {
  9827. /* Full matrix */
  9828. i__1 = *n;
  9829. for (j = 1; j <= i__1; ++j) {
  9830. i__2 = *m;
  9831. for (i__ = 1; i__ <= i__2; ++i__) {
  9832. a_ref (i__, j) = a_ref (i__, j) * mul;
  9833. /* L20: */
  9834. }
  9835. /* L30: */
  9836. }
  9837. } else if (itype == 1) {
  9838. /* Lower triangular matrix */
  9839. i__1 = *n;
  9840. for (j = 1; j <= i__1; ++j) {
  9841. i__2 = *m;
  9842. for (i__ = j; i__ <= i__2; ++i__) {
  9843. a_ref (i__, j) = a_ref (i__, j) * mul;
  9844. /* L40: */
  9845. }
  9846. /* L50: */
  9847. }
  9848. } else if (itype == 2) {
  9849. /* Upper triangular matrix */
  9850. i__1 = *n;
  9851. for (j = 1; j <= i__1; ++j) {
  9852. i__2 = MIN (j, *m);
  9853. for (i__ = 1; i__ <= i__2; ++i__) {
  9854. a_ref (i__, j) = a_ref (i__, j) * mul;
  9855. /* L60: */
  9856. }
  9857. /* L70: */
  9858. }
  9859. } else if (itype == 3) {
  9860. /* Upper Hessenberg matrix */
  9861. i__1 = *n;
  9862. for (j = 1; j <= i__1; ++j) {
  9863. /* Computing MIN */
  9864. i__3 = j + 1;
  9865. i__2 = MIN (i__3, *m);
  9866. for (i__ = 1; i__ <= i__2; ++i__) {
  9867. a_ref (i__, j) = a_ref (i__, j) * mul;
  9868. /* L80: */
  9869. }
  9870. /* L90: */
  9871. }
  9872. } else if (itype == 4) {
  9873. /* Lower half of a symmetric band matrix */
  9874. k3 = *kl + 1;
  9875. k4 = *n + 1;
  9876. i__1 = *n;
  9877. for (j = 1; j <= i__1; ++j) {
  9878. /* Computing MIN */
  9879. i__3 = k3, i__4 = k4 - j;
  9880. i__2 = MIN (i__3, i__4);
  9881. for (i__ = 1; i__ <= i__2; ++i__) {
  9882. a_ref (i__, j) = a_ref (i__, j) * mul;
  9883. /* L100: */
  9884. }
  9885. /* L110: */
  9886. }
  9887. } else if (itype == 5) {
  9888. /* Upper half of a symmetric band matrix */
  9889. k1 = *ku + 2;
  9890. k3 = *ku + 1;
  9891. i__1 = *n;
  9892. for (j = 1; j <= i__1; ++j) {
  9893. /* Computing MAX */
  9894. i__2 = k1 - j;
  9895. i__3 = k3;
  9896. for (i__ = MAX (i__2, 1); i__ <= i__3; ++i__) {
  9897. a_ref (i__, j) = a_ref (i__, j) * mul;
  9898. /* L120: */
  9899. }
  9900. /* L130: */
  9901. }
  9902. } else if (itype == 6) {
  9903. /* Band matrix */
  9904. k1 = *kl + *ku + 2;
  9905. k2 = *kl + 1;
  9906. k3 = (*kl << 1) + *ku + 1;
  9907. k4 = *kl + *ku + 1 + *m;
  9908. i__1 = *n;
  9909. for (j = 1; j <= i__1; ++j) {
  9910. /* Computing MAX */
  9911. i__3 = k1 - j;
  9912. /* Computing MIN */
  9913. i__4 = k3, i__5 = k4 - j;
  9914. i__2 = MIN (i__4, i__5);
  9915. for (i__ = MAX (i__3, k2); i__ <= i__2; ++i__) {
  9916. a_ref (i__, j) = a_ref (i__, j) * mul;
  9917. /* L140: */
  9918. }
  9919. /* L150: */
  9920. }
  9921. }
  9922. if (!done) {
  9923. goto L10;
  9924. }
  9925. return 0;
  9926. } /* NUMlapack_dlascl */
  9927. int NUMlapack_dlaset (const char *uplo, integer *m, integer *n, double *alpha, double *beta, double *a, integer *lda) {
  9928. /* System generated locals */
  9929. integer a_dim1, a_offset, i__1, i__2, i__3;
  9930. /* Local variables */
  9931. static integer i__, j;
  9932. a_dim1 = *lda;
  9933. a_offset = 1 + a_dim1 * 1;
  9934. a -= a_offset;
  9935. /* Function Body */
  9936. if (lsame_ (uplo, "U")) {
  9937. /* Set the strictly upper triangular or trapezoidal part of the array
  9938. to ALPHA. */
  9939. i__1 = *n;
  9940. for (j = 2; j <= i__1; ++j) {
  9941. /* Computing MIN */
  9942. i__3 = j - 1;
  9943. i__2 = MIN (i__3, *m);
  9944. for (i__ = 1; i__ <= i__2; ++i__) {
  9945. a_ref (i__, j) = *alpha;
  9946. /* L10: */
  9947. }
  9948. /* L20: */
  9949. }
  9950. } else if (lsame_ (uplo, "L")) {
  9951. /* Set the strictly lower triangular or trapezoidal part of the array
  9952. to ALPHA. */
  9953. i__1 = MIN (*m, *n);
  9954. for (j = 1; j <= i__1; ++j) {
  9955. i__2 = *m;
  9956. for (i__ = j + 1; i__ <= i__2; ++i__) {
  9957. a_ref (i__, j) = *alpha;
  9958. /* L30: */
  9959. }
  9960. /* L40: */
  9961. }
  9962. } else {
  9963. /* Set the leading m-by-n submatrix to ALPHA. */
  9964. i__1 = *n;
  9965. for (j = 1; j <= i__1; ++j) {
  9966. i__2 = *m;
  9967. for (i__ = 1; i__ <= i__2; ++i__) {
  9968. a_ref (i__, j) = *alpha;
  9969. /* L50: */
  9970. }
  9971. /* L60: */
  9972. }
  9973. }
  9974. /* Set the first MIN(M,N) diagonal elements to BETA. */
  9975. i__1 = MIN (*m, *n);
  9976. for (i__ = 1; i__ <= i__1; ++i__) {
  9977. a_ref (i__, i__) = *beta;
  9978. /* L70: */
  9979. }
  9980. return 0;
  9981. } /* NUMlapack_dlaset */
  9982. int NUMlapack_dlasq1 (integer *n, double *d__, double *e, double *work, integer *info) {
  9983. /* System generated locals */
  9984. integer i__1, i__2;
  9985. double d__1, d__2, d__3;
  9986. /* Local variables */
  9987. static integer i__;
  9988. static double scale;
  9989. static integer iinfo;
  9990. static double sigmn;
  9991. static double sigmx;
  9992. static double safmin;
  9993. static double eps;
  9994. /* Parameter adjustments */
  9995. --work;
  9996. --e;
  9997. --d__;
  9998. /* Function Body */
  9999. *info = 0;
  10000. if (*n < 0) {
  10001. *info = -2;
  10002. i__1 = - (*info);
  10003. xerbla_ ("DLASQ1", &i__1);
  10004. return 0;
  10005. } else if (*n == 0) {
  10006. return 0;
  10007. } else if (*n == 1) {
  10008. d__[1] = fabs (d__[1]);
  10009. return 0;
  10010. } else if (*n == 2) {
  10011. NUMlapack_dlas2 (&d__[1], &e[1], &d__[2], &sigmn, &sigmx);
  10012. d__[1] = sigmx;
  10013. d__[2] = sigmn;
  10014. return 0;
  10015. }
  10016. /* Estimate the largest singular value. */
  10017. sigmx = 0.;
  10018. i__1 = *n - 1;
  10019. for (i__ = 1; i__ <= i__1; ++i__) {
  10020. d__[i__] = (d__1 = d__[i__], fabs (d__1));
  10021. /* Computing MAX */
  10022. d__2 = sigmx, d__3 = (d__1 = e[i__], fabs (d__1));
  10023. sigmx = MAX (d__2, d__3);
  10024. /* L10: */
  10025. }
  10026. d__[*n] = (d__1 = d__[*n], fabs (d__1));
  10027. /* Early return if SIGMX is zero (matrix is already diagonal). */
  10028. if (sigmx == 0.) {
  10029. NUMlapack_dlasrt ("D", n, &d__[1], &iinfo);
  10030. return 0;
  10031. }
  10032. i__1 = *n;
  10033. for (i__ = 1; i__ <= i__1; ++i__) {
  10034. /* Computing MAX */
  10035. d__1 = sigmx, d__2 = d__[i__];
  10036. sigmx = MAX (d__1, d__2);
  10037. /* L20: */
  10038. }
  10039. /* Copy D and E into WORK (in the Z format) and scale (squaring the input
  10040. data makes scaling by a power of the radix pointless). */
  10041. eps = NUMblas_dlamch ("Precision");
  10042. safmin = NUMblas_dlamch ("Safe minimum");
  10043. scale = sqrt (eps / safmin);
  10044. NUMblas_dcopy (n, &d__[1], &c__1, &work[1], &c__2);
  10045. i__1 = *n - 1;
  10046. NUMblas_dcopy (&i__1, &e[1], &c__1, &work[2], &c__2);
  10047. i__1 = (*n << 1) - 1;
  10048. i__2 = (*n << 1) - 1;
  10049. NUMlapack_dlascl ("G", &c__0, &c__0, &sigmx, &scale, &i__1, &c__1, &work[1], &i__2, &iinfo);
  10050. /* Compute the q's and e's. */
  10051. i__1 = (*n << 1) - 1;
  10052. for (i__ = 1; i__ <= i__1; ++i__) {
  10053. /* Computing 2nd power */
  10054. d__1 = work[i__];
  10055. work[i__] = d__1 * d__1;
  10056. /* L30: */
  10057. }
  10058. work[*n * 2] = 0.;
  10059. NUMlapack_dlasq2 (n, &work[1], info);
  10060. if (*info == 0) {
  10061. i__1 = *n;
  10062. for (i__ = 1; i__ <= i__1; ++i__) {
  10063. d__[i__] = sqrt (work[i__]);
  10064. /* L40: */
  10065. }
  10066. NUMlapack_dlascl ("G", &c__0, &c__0, &scale, &sigmx, n, &c__1, &d__[1], n, &iinfo);
  10067. }
  10068. return 0;
  10069. } /* NUMlapack_dlasq1 */
  10070. int NUMlapack_dlasq2 (integer *n, double *z__, integer *info) {
  10071. /* System generated locals */
  10072. integer i__1, i__2, i__3;
  10073. double d__1, d__2;
  10074. /* Local variables */
  10075. static integer ieee;
  10076. static integer nbig;
  10077. static double dmin__, emin, emax;
  10078. static integer ndiv, iter;
  10079. static double qmin, temp, qmax, zmax;
  10080. static integer splt;
  10081. static double d__, e;
  10082. static integer k;
  10083. static double s, t;
  10084. static integer nfail;
  10085. static double desig, trace, sigma;
  10086. static integer iinfo, i0, i4, n0;
  10087. static integer pp, iwhila, iwhilb;
  10088. static double oldemn, safmin;
  10089. static double eps, tol;
  10090. static integer ipn4;
  10091. static double tol2;
  10092. /* Parameter adjustments */
  10093. --z__;
  10094. /* Function Body */
  10095. *info = 0;
  10096. eps = NUMblas_dlamch ("Precision");
  10097. safmin = NUMblas_dlamch ("Safe minimum");
  10098. tol = eps * 100.;
  10099. /* Computing 2nd power */
  10100. d__1 = tol;
  10101. tol2 = d__1 * d__1;
  10102. if (*n < 0) {
  10103. *info = -1;
  10104. xerbla_ ("DLASQ2", &c__1);
  10105. return 0;
  10106. } else if (*n == 0) {
  10107. return 0;
  10108. } else if (*n == 1) {
  10109. /* 1-by-1 case. */
  10110. if (z__[1] < 0.) {
  10111. *info = -201;
  10112. xerbla_ ("DLASQ2", &c__2);
  10113. }
  10114. return 0;
  10115. } else if (*n == 2) {
  10116. /* 2-by-2 case. */
  10117. if (z__[2] < 0. || z__[3] < 0.) {
  10118. *info = -2;
  10119. xerbla_ ("DLASQ2", &c__2);
  10120. return 0;
  10121. } else if (z__[3] > z__[1]) {
  10122. d__ = z__[3];
  10123. z__[3] = z__[1];
  10124. z__[1] = d__;
  10125. }
  10126. z__[5] = z__[1] + z__[2] + z__[3];
  10127. if (z__[2] > z__[3] * tol2) {
  10128. t = (z__[1] - z__[3] + z__[2]) * .5;
  10129. s = z__[3] * (z__[2] / t);
  10130. if (s <= t) {
  10131. s = z__[3] * (z__[2] / (t * (sqrt (s / t + 1.) + 1.)));
  10132. } else {
  10133. s = z__[3] * (z__[2] / (t + sqrt (t) * sqrt (t + s)));
  10134. }
  10135. t = z__[1] + (s + z__[2]);
  10136. z__[3] *= z__[1] / t;
  10137. z__[1] = t;
  10138. }
  10139. z__[2] = z__[3];
  10140. z__[6] = z__[2] + z__[1];
  10141. return 0;
  10142. }
  10143. /* Check for negative data and compute sums of q's and e's. */
  10144. z__[*n * 2] = 0.;
  10145. emin = z__[2];
  10146. qmax = 0.;
  10147. zmax = 0.;
  10148. d__ = 0.;
  10149. e = 0.;
  10150. i__1 = *n - 1 << 1;
  10151. for (k = 1; k <= i__1; k += 2) {
  10152. if (z__[k] < 0.) {
  10153. *info = - (k + 200);
  10154. xerbla_ ("DLASQ2", &c__2);
  10155. return 0;
  10156. } else if (z__[k + 1] < 0.) {
  10157. *info = - (k + 201);
  10158. xerbla_ ("DLASQ2", &c__2);
  10159. return 0;
  10160. }
  10161. d__ += z__[k];
  10162. e += z__[k + 1];
  10163. /* Computing MAX */
  10164. d__1 = qmax, d__2 = z__[k];
  10165. qmax = MAX (d__1, d__2);
  10166. /* Computing MIN */
  10167. d__1 = emin, d__2 = z__[k + 1];
  10168. emin = MIN (d__1, d__2);
  10169. /* Computing MAX */
  10170. d__1 = MAX (qmax, zmax), d__2 = z__[k + 1];
  10171. zmax = MAX (d__1, d__2);
  10172. /* L10: */
  10173. }
  10174. if (z__[ (*n << 1) - 1] < 0.) {
  10175. *info = - ( (*n << 1) + 199);
  10176. xerbla_ ("DLASQ2", &c__2);
  10177. return 0;
  10178. }
  10179. d__ += z__[ (*n << 1) - 1];
  10180. /* Computing MAX */
  10181. d__1 = qmax, d__2 = z__[ (*n << 1) - 1];
  10182. qmax = MAX (d__1, d__2);
  10183. zmax = MAX (qmax, zmax);
  10184. /* Check for diagonality. */
  10185. if (e == 0.) {
  10186. i__1 = *n;
  10187. for (k = 2; k <= i__1; ++k) {
  10188. z__[k] = z__[ (k << 1) - 1];
  10189. /* L20: */
  10190. }
  10191. NUMlapack_dlasrt ("D", n, &z__[1], &iinfo);
  10192. z__[ (*n << 1) - 1] = d__;
  10193. return 0;
  10194. }
  10195. trace = d__ + e;
  10196. /* Check for zero data. */
  10197. if (trace == 0.) {
  10198. z__[ (*n << 1) - 1] = 0.;
  10199. return 0;
  10200. }
  10201. /* Check whether the machine is IEEE conformable. */
  10202. ieee = NUMlapack_ilaenv (&c__10, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4, 6, 1) == 1 &&
  10203. NUMlapack_ilaenv (&c__11, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4, 6, 1) == 1;
  10204. /* Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). */
  10205. for (k = *n << 1; k >= 2; k += -2) {
  10206. z__[k * 2] = 0.;
  10207. z__[ (k << 1) - 1] = z__[k];
  10208. z__[ (k << 1) - 2] = 0.;
  10209. z__[ (k << 1) - 3] = z__[k - 1];
  10210. /* L30: */
  10211. }
  10212. i0 = 1;
  10213. n0 = *n;
  10214. /* Reverse the qd-array, if warranted. */
  10215. if (z__[ (i0 << 2) - 3] * 1.5 < z__[ (n0 << 2) - 3]) {
  10216. ipn4 = i0 + n0 << 2;
  10217. i__1 = i0 + n0 - 1 << 1;
  10218. for (i4 = i0 << 2; i4 <= i__1; i4 += 4) {
  10219. temp = z__[i4 - 3];
  10220. z__[i4 - 3] = z__[ipn4 - i4 - 3];
  10221. z__[ipn4 - i4 - 3] = temp;
  10222. temp = z__[i4 - 1];
  10223. z__[i4 - 1] = z__[ipn4 - i4 - 5];
  10224. z__[ipn4 - i4 - 5] = temp;
  10225. /* L40: */
  10226. }
  10227. }
  10228. /* Initial split checking via dqd and Li's test. */
  10229. pp = 0;
  10230. for (k = 1; k <= 2; ++k) {
  10231. d__ = z__[ (n0 << 2) + pp - 3];
  10232. i__1 = (i0 << 2) + pp;
  10233. for (i4 = (n0 - 1 << 2) + pp; i4 >= i__1; i4 += -4) {
  10234. if (z__[i4 - 1] <= tol2 * d__) {
  10235. z__[i4 - 1] = 0.;
  10236. d__ = z__[i4 - 3];
  10237. } else {
  10238. d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1]));
  10239. }
  10240. /* L50: */
  10241. }
  10242. /* dqd maps Z to ZZ plus Li's test. */
  10243. emin = z__[ (i0 << 2) + pp + 1];
  10244. d__ = z__[ (i0 << 2) + pp - 3];
  10245. i__1 = (n0 - 1 << 2) + pp;
  10246. for (i4 = (i0 << 2) + pp; i4 <= i__1; i4 += 4) {
  10247. z__[i4 - (pp << 1) - 2] = d__ + z__[i4 - 1];
  10248. if (z__[i4 - 1] <= tol2 * d__) {
  10249. z__[i4 - 1] = 0.;
  10250. z__[i4 - (pp << 1) - 2] = d__;
  10251. z__[i4 - (pp << 1)] = 0.;
  10252. d__ = z__[i4 + 1];
  10253. } else if (safmin * z__[i4 + 1] < z__[i4 - (pp << 1) - 2] &&
  10254. safmin * z__[i4 - (pp << 1) - 2] < z__[i4 + 1]) {
  10255. temp = z__[i4 + 1] / z__[i4 - (pp << 1) - 2];
  10256. z__[i4 - (pp << 1)] = z__[i4 - 1] * temp;
  10257. d__ *= temp;
  10258. } else {
  10259. z__[i4 - (pp << 1)] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4 - (pp << 1) - 2]);
  10260. d__ = z__[i4 + 1] * (d__ / z__[i4 - (pp << 1) - 2]);
  10261. }
  10262. /* Computing MIN */
  10263. d__1 = emin, d__2 = z__[i4 - (pp << 1)];
  10264. emin = MIN (d__1, d__2);
  10265. /* L60: */
  10266. }
  10267. z__[ (n0 << 2) - pp - 2] = d__;
  10268. /* Now find qmax. */
  10269. qmax = z__[ (i0 << 2) - pp - 2];
  10270. i__1 = (n0 << 2) - pp - 2;
  10271. for (i4 = (i0 << 2) - pp + 2; i4 <= i__1; i4 += 4) {
  10272. /* Computing MAX */
  10273. d__1 = qmax, d__2 = z__[i4];
  10274. qmax = MAX (d__1, d__2);
  10275. /* L70: */
  10276. }
  10277. /* Prepare for the next iteration on K. */
  10278. pp = 1 - pp;
  10279. /* L80: */
  10280. }
  10281. iter = 2;
  10282. nfail = 0;
  10283. ndiv = n0 - i0 << 1;
  10284. i__1 = *n + 1;
  10285. for (iwhila = 1; iwhila <= i__1; ++iwhila) {
  10286. if (n0 < 1) {
  10287. goto L150;
  10288. }
  10289. /* While array unfinished do
  10290. E(N0) holds the value of SIGMA when submatrix in I0:N0 splits from
  10291. the rest of the array, but is negated. */
  10292. desig = 0.;
  10293. if (n0 == *n) {
  10294. sigma = 0.;
  10295. } else {
  10296. sigma = -z__[ (n0 << 2) - 1];
  10297. }
  10298. if (sigma < 0.) {
  10299. *info = 1;
  10300. return 0;
  10301. }
  10302. /* Find last unreduced submatrix's top index I0, find QMAX and EMIN.
  10303. Find Gershgorin-type bound if Q's much greater than E's. */
  10304. emax = 0.;
  10305. if (n0 > i0) {
  10306. emin = (d__1 = z__[ (n0 << 2) - 5], fabs (d__1));
  10307. } else {
  10308. emin = 0.;
  10309. }
  10310. qmin = z__[ (n0 << 2) - 3];
  10311. qmax = qmin;
  10312. for (i4 = n0 << 2; i4 >= 8; i4 += -4) {
  10313. if (z__[i4 - 5] <= 0.) {
  10314. goto L100;
  10315. }
  10316. if (qmin >= emax * 4.) {
  10317. /* Computing MIN */
  10318. d__1 = qmin, d__2 = z__[i4 - 3];
  10319. qmin = MIN (d__1, d__2);
  10320. /* Computing MAX */
  10321. d__1 = emax, d__2 = z__[i4 - 5];
  10322. emax = MAX (d__1, d__2);
  10323. }
  10324. /* Computing MAX */
  10325. d__1 = qmax, d__2 = z__[i4 - 7] + z__[i4 - 5];
  10326. qmax = MAX (d__1, d__2);
  10327. /* Computing MIN */
  10328. d__1 = emin, d__2 = z__[i4 - 5];
  10329. emin = MIN (d__1, d__2);
  10330. /* L90: */
  10331. }
  10332. i4 = 4;
  10333. L100:
  10334. i0 = i4 / 4;
  10335. /* Store EMIN for passing to DLASQ3. */
  10336. z__[ (n0 << 2) - 1] = emin;
  10337. /* Put -(initial shift) into DMIN.
  10338. Computing MAX */
  10339. d__1 = 0., d__2 = qmin - sqrt (qmin) * 2. * sqrt (emax);
  10340. dmin__ = -MAX (d__1, d__2);
  10341. /* Now I0:N0 is unreduced. PP = 0 for ping, PP = 1 for pong. */
  10342. pp = 0;
  10343. nbig = (n0 - i0 + 1) * 30;
  10344. i__2 = nbig;
  10345. for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) {
  10346. if (i0 > n0) {
  10347. goto L130;
  10348. }
  10349. /* While submatrix unfinished take a good dqds step. */
  10350. NUMlapack_dlasq3 (&i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, &nfail, &iter, &ndiv,
  10351. &ieee);
  10352. pp = 1 - pp;
  10353. /* When EMIN is very small check for splits. */
  10354. if (pp == 0 && n0 - i0 >= 3) {
  10355. if (z__[n0 * 4] <= tol2 * qmax || z__[ (n0 << 2) - 1] <= tol2 * sigma) {
  10356. splt = i0 - 1;
  10357. qmax = z__[ (i0 << 2) - 3];
  10358. emin = z__[ (i0 << 2) - 1];
  10359. oldemn = z__[i0 * 4];
  10360. i__3 = n0 - 3 << 2;
  10361. for (i4 = i0 << 2; i4 <= i__3; i4 += 4) {
  10362. if (z__[i4] <= tol2 * z__[i4 - 3] || z__[i4 - 1] <= tol2 * sigma) {
  10363. z__[i4 - 1] = -sigma;
  10364. splt = i4 / 4;
  10365. qmax = 0.;
  10366. emin = z__[i4 + 3];
  10367. oldemn = z__[i4 + 4];
  10368. } else {
  10369. /* Computing MAX */
  10370. d__1 = qmax, d__2 = z__[i4 + 1];
  10371. qmax = MAX (d__1, d__2);
  10372. /* Computing MIN */
  10373. d__1 = emin, d__2 = z__[i4 - 1];
  10374. emin = MIN (d__1, d__2);
  10375. /* Computing MIN */
  10376. d__1 = oldemn, d__2 = z__[i4];
  10377. oldemn = MIN (d__1, d__2);
  10378. }
  10379. /* L110: */
  10380. }
  10381. z__[ (n0 << 2) - 1] = emin;
  10382. z__[n0 * 4] = oldemn;
  10383. i0 = splt + 1;
  10384. }
  10385. }
  10386. /* L120: */
  10387. }
  10388. *info = 2;
  10389. return 0;
  10390. /* end IWHILB */
  10391. L130:
  10392. /* L140: */
  10393. ;
  10394. }
  10395. *info = 3;
  10396. return 0;
  10397. /* end IWHILA */
  10398. L150:
  10399. /* Move q's to the front. */
  10400. i__1 = *n;
  10401. for (k = 2; k <= i__1; ++k) {
  10402. z__[k] = z__[ (k << 2) - 3];
  10403. /* L160: */
  10404. }
  10405. /* Sort and compute sum of eigenvalues. */
  10406. NUMlapack_dlasrt ("D", n, &z__[1], &iinfo);
  10407. e = 0.;
  10408. for (k = *n; k >= 1; --k) {
  10409. e += z__[k];
  10410. /* L170: */
  10411. }
  10412. /* Store trace, sum(eigenvalues) and information on performance. */
  10413. z__[ (*n << 1) + 1] = trace;
  10414. z__[ (*n << 1) + 2] = e;
  10415. z__[ (*n << 1) + 3] = (double) iter;
  10416. /* Computing 2nd power */
  10417. i__1 = *n;
  10418. z__[ (*n << 1) + 4] = (double) ndiv / (double) (i__1 * i__1);
  10419. z__[ (*n << 1) + 5] = nfail * 100. / (double) iter;
  10420. return 0;
  10421. } /* NUMlapack_dlasq2 */
  10422. int NUMlapack_dlasq3 (integer *i0, integer *n0, double *z__, integer *pp, double *dmin__, double *sigma, double *desig,
  10423. double *qmax, integer *nfail, integer *iter, integer *ndiv, integer *ieee) {
  10424. /* Initialized data */
  10425. static integer ttype = 0;
  10426. static double dmin1 = 0.;
  10427. static double dmin2 = 0.;
  10428. static double dn = 0.;
  10429. static double dn1 = 0.;
  10430. static double dn2 = 0.;
  10431. static double tau = 0.;
  10432. /* System generated locals */
  10433. integer i__1;
  10434. double d__1, d__2;
  10435. /* Local variables */
  10436. static double temp, s, t;
  10437. static integer j4;
  10438. static integer nn;
  10439. static double safmin, eps, tol;
  10440. static integer n0in, ipn4;
  10441. static double tol2;
  10442. --z__;
  10443. /* Function Body */
  10444. n0in = *n0;
  10445. eps = NUMblas_dlamch ("Precision");
  10446. safmin = NUMblas_dlamch ("Safe minimum");
  10447. tol = eps * 100.;
  10448. /* Computing 2nd power */
  10449. d__1 = tol;
  10450. tol2 = d__1 * d__1;
  10451. /* Check for deflation. */
  10452. L10:
  10453. if (*n0 < *i0) {
  10454. return 0;
  10455. }
  10456. if (*n0 == *i0) {
  10457. goto L20;
  10458. }
  10459. nn = (*n0 << 2) + *pp;
  10460. if (*n0 == *i0 + 1) {
  10461. goto L40;
  10462. }
  10463. /* Check whether E(N0-1) is negligible, 1 eigenvalue. */
  10464. if (z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - (*pp << 1) - 4] > tol2 * z__[nn - 7]) {
  10465. goto L30;
  10466. }
  10467. L20:
  10468. z__[ (*n0 << 2) - 3] = z__[ (*n0 << 2) + *pp - 3] + *sigma;
  10469. -- (*n0);
  10470. goto L10;
  10471. /* Check whether E(N0-2) is negligible, 2 eigenvalues. */
  10472. L30:
  10473. if (z__[nn - 9] > tol2 * *sigma && z__[nn - (*pp << 1) - 8] > tol2 * z__[nn - 11]) {
  10474. goto L50;
  10475. }
  10476. L40:
  10477. if (z__[nn - 3] > z__[nn - 7]) {
  10478. s = z__[nn - 3];
  10479. z__[nn - 3] = z__[nn - 7];
  10480. z__[nn - 7] = s;
  10481. }
  10482. if (z__[nn - 5] > z__[nn - 3] * tol2) {
  10483. t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5;
  10484. s = z__[nn - 3] * (z__[nn - 5] / t);
  10485. if (s <= t) {
  10486. s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt (s / t + 1.) + 1.)));
  10487. } else {
  10488. s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt (t) * sqrt (t + s)));
  10489. }
  10490. t = z__[nn - 7] + (s + z__[nn - 5]);
  10491. z__[nn - 3] *= z__[nn - 7] / t;
  10492. z__[nn - 7] = t;
  10493. }
  10494. z__[ (*n0 << 2) - 7] = z__[nn - 7] + *sigma;
  10495. z__[ (*n0 << 2) - 3] = z__[nn - 3] + *sigma;
  10496. *n0 += -2;
  10497. goto L10;
  10498. L50:
  10499. /* Reverse the qd-array, if warranted. */
  10500. if (*dmin__ <= 0. || *n0 < n0in) {
  10501. if (z__[ (*i0 << 2) + *pp - 3] * 1.5 < z__[ (*n0 << 2) + *pp - 3]) {
  10502. ipn4 = *i0 + *n0 << 2;
  10503. i__1 = *i0 + *n0 - 1 << 1;
  10504. for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
  10505. temp = z__[j4 - 3];
  10506. z__[j4 - 3] = z__[ipn4 - j4 - 3];
  10507. z__[ipn4 - j4 - 3] = temp;
  10508. temp = z__[j4 - 2];
  10509. z__[j4 - 2] = z__[ipn4 - j4 - 2];
  10510. z__[ipn4 - j4 - 2] = temp;
  10511. temp = z__[j4 - 1];
  10512. z__[j4 - 1] = z__[ipn4 - j4 - 5];
  10513. z__[ipn4 - j4 - 5] = temp;
  10514. temp = z__[j4];
  10515. z__[j4] = z__[ipn4 - j4 - 4];
  10516. z__[ipn4 - j4 - 4] = temp;
  10517. /* L60: */
  10518. }
  10519. if (*n0 - *i0 <= 4) {
  10520. z__[ (*n0 << 2) + *pp - 1] = z__[ (*i0 << 2) + *pp - 1];
  10521. z__[ (*n0 << 2) - *pp] = z__[ (*i0 << 2) - *pp];
  10522. }
  10523. /* Computing MIN */
  10524. d__1 = dmin2, d__2 = z__[ (*n0 << 2) + *pp - 1];
  10525. dmin2 = MIN (d__1, d__2);
  10526. /* Computing MIN */
  10527. d__1 = z__[ (*n0 << 2) + *pp - 1], d__2 = z__[ (*i0 << 2) + *pp - 1], d__1 =
  10528. MIN (d__1, d__2), d__2 = z__[ (*i0 << 2) + *pp + 3];
  10529. z__[ (*n0 << 2) + *pp - 1] = MIN (d__1, d__2);
  10530. /* Computing MIN */
  10531. d__1 = z__[ (*n0 << 2) - *pp], d__2 = z__[ (*i0 << 2) - *pp], d__1 = MIN (d__1, d__2), d__2 =
  10532. z__[ (*i0 << 2) - *pp + 4];
  10533. z__[ (*n0 << 2) - *pp] = MIN (d__1, d__2);
  10534. /* Computing MAX */
  10535. d__1 = *qmax, d__2 = z__[ (*i0 << 2) + *pp - 3], d__1 = MAX (d__1, d__2), d__2 =
  10536. z__[ (*i0 << 2) + *pp + 1];
  10537. *qmax = MAX (d__1, d__2);
  10538. *dmin__ = 0.;
  10539. }
  10540. }
  10541. /* L70:
  10542. Computing MIN */
  10543. d__1 = z__[ (*n0 << 2) + *pp - 1], d__2 = z__[ (*n0 << 2) + *pp - 9], d__1 = MIN (d__1, d__2), d__2 =
  10544. dmin2 + z__[ (*n0 << 2) - *pp];
  10545. if (*dmin__ < 0. || safmin * *qmax < MIN (d__1, d__2)) {
  10546. /* Choose a shift. */
  10547. NUMlapack_dlasq4 (i0, n0, &z__[1], pp, &n0in, dmin__, &dmin1, &dmin2, &dn, &dn1, &dn2, &tau, &ttype);
  10548. /* Call dqds until DMIN > 0. */
  10549. L80:
  10550. NUMlapack_dlasq5 (i0, n0, &z__[1], pp, &tau, dmin__, &dmin1, &dmin2, &dn, &dn1, &dn2, ieee);
  10551. *ndiv += *n0 - *i0 + 2;
  10552. ++ (*iter);
  10553. /* Check status. */
  10554. if (*dmin__ >= 0. && dmin1 > 0.) {
  10555. /* Success. */
  10556. goto L100;
  10557. } else if (*dmin__ < 0. && dmin1 > 0. && z__[ (*n0 - 1 << 2) - *pp] < tol * (*sigma + dn1) &&
  10558. fabs (dn) < tol * *sigma) {
  10559. /* Convergence hidden by negative DN. */
  10560. z__[ (*n0 - 1 << 2) - *pp + 2] = 0.;
  10561. *dmin__ = 0.;
  10562. goto L100;
  10563. } else if (*dmin__ < 0.) {
  10564. /* TAU too big. Select new TAU and try again. */
  10565. ++ (*nfail);
  10566. if (ttype < -22) {
  10567. /* Failed twice. Play it safe. */
  10568. tau = 0.;
  10569. } else if (dmin1 > 0.) {
  10570. /* Late failure. Gives excellent shift. */
  10571. tau = (tau + *dmin__) * (1. - eps * 2.);
  10572. ttype += -11;
  10573. } else {
  10574. /* Early failure. Divide by 4. */
  10575. tau *= .25;
  10576. ttype += -12;
  10577. }
  10578. goto L80;
  10579. } else if (*dmin__ != *dmin__) {
  10580. /* NaN. */
  10581. tau = 0.;
  10582. goto L80;
  10583. } else {
  10584. /* Possible underflow. Play it safe. */
  10585. goto L90;
  10586. }
  10587. }
  10588. /* Risk of underflow. */
  10589. L90:
  10590. NUMlapack_dlasq6 (i0, n0, &z__[1], pp, dmin__, &dmin1, &dmin2, &dn, &dn1, &dn2);
  10591. *ndiv += *n0 - *i0 + 2;
  10592. ++ (*iter);
  10593. tau = 0.;
  10594. L100:
  10595. if (tau < *sigma) {
  10596. *desig += tau;
  10597. t = *sigma + *desig;
  10598. *desig -= t - *sigma;
  10599. } else {
  10600. t = *sigma + tau;
  10601. *desig = *sigma - (t - tau) + *desig;
  10602. }
  10603. *sigma = t;
  10604. return 0;
  10605. } /* NUMlapack_dlasq3 */
  10606. int NUMlapack_dlasq4 (integer *i0, integer *n0, double *z__, integer *pp, integer *n0in, double *dmin__, double *dmin1,
  10607. double *dmin2, double *dn, double *dn1, double *dn2, double *tau, integer *ttype) {
  10608. /* Initialized data */
  10609. static double g = 0.;
  10610. /* System generated locals */
  10611. integer i__1;
  10612. double d__1, d__2;
  10613. /* Local variables */
  10614. static double s, a2, b1, b2;
  10615. static integer i4, nn, np;
  10616. static double gam, gap1, gap2;
  10617. /* Parameter adjustments */
  10618. --z__;
  10619. /* Function Body
  10620. A negative DMIN forces the shift to take that absolute value TTYPE
  10621. records the type of shift. */
  10622. if (*dmin__ <= 0.) {
  10623. *tau = - (*dmin__);
  10624. *ttype = -1;
  10625. return 0;
  10626. }
  10627. nn = (*n0 << 2) + *pp;
  10628. if (*n0in == *n0) {
  10629. /* No eigenvalues deflated. */
  10630. if (*dmin__ == *dn || *dmin__ == *dn1) {
  10631. b1 = sqrt (z__[nn - 3]) * sqrt (z__[nn - 5]);
  10632. b2 = sqrt (z__[nn - 7]) * sqrt (z__[nn - 9]);
  10633. a2 = z__[nn - 7] + z__[nn - 5];
  10634. /* Cases 2 and 3. */
  10635. if (*dmin__ == *dn && *dmin1 == *dn1) {
  10636. gap2 = *dmin2 - a2 - *dmin2 * .25;
  10637. if (gap2 > 0. && gap2 > b2) {
  10638. gap1 = a2 - *dn - b2 / gap2 * b2;
  10639. } else {
  10640. gap1 = a2 - *dn - (b1 + b2);
  10641. }
  10642. if (gap1 > 0. && gap1 > b1) {
  10643. /* Computing MAX */
  10644. d__1 = *dn - b1 / gap1 * b1, d__2 = *dmin__ * .5;
  10645. s = MAX (d__1, d__2);
  10646. *ttype = -2;
  10647. } else {
  10648. s = 0.;
  10649. if (*dn > b1) {
  10650. s = *dn - b1;
  10651. }
  10652. if (a2 > b1 + b2) {
  10653. /* Computing MIN */
  10654. d__1 = s, d__2 = a2 - (b1 + b2);
  10655. s = MIN (d__1, d__2);
  10656. }
  10657. /* Computing MAX */
  10658. d__1 = s, d__2 = *dmin__ * .333;
  10659. s = MAX (d__1, d__2);
  10660. *ttype = -3;
  10661. }
  10662. } else {
  10663. /* Case 4. */
  10664. *ttype = -4;
  10665. s = *dmin__ * .25;
  10666. if (*dmin__ == *dn) {
  10667. gam = *dn;
  10668. a2 = 0.;
  10669. if (z__[nn - 5] > z__[nn - 7]) {
  10670. return 0;
  10671. }
  10672. b2 = z__[nn - 5] / z__[nn - 7];
  10673. np = nn - 9;
  10674. } else {
  10675. np = nn - (*pp << 1);
  10676. b2 = z__[np - 2];
  10677. gam = *dn1;
  10678. if (z__[np - 4] > z__[np - 2]) {
  10679. return 0;
  10680. }
  10681. a2 = z__[np - 4] / z__[np - 2];
  10682. if (z__[nn - 9] > z__[nn - 11]) {
  10683. return 0;
  10684. }
  10685. b2 = z__[nn - 9] / z__[nn - 11];
  10686. np = nn - 13;
  10687. }
  10688. /* Approximate contribution to norm squared from I < NN-1. */
  10689. a2 += b2;
  10690. i__1 = (*i0 << 2) - 1 + *pp;
  10691. for (i4 = np; i4 >= i__1; i4 += -4) {
  10692. if (b2 == 0.) {
  10693. goto L20;
  10694. }
  10695. b1 = b2;
  10696. if (z__[i4] > z__[i4 - 2]) {
  10697. return 0;
  10698. }
  10699. b2 *= z__[i4] / z__[i4 - 2];
  10700. a2 += b2;
  10701. if (MAX (b2, b1) * 100. < a2 || .563 < a2) {
  10702. goto L20;
  10703. }
  10704. /* L10: */
  10705. }
  10706. L20:
  10707. a2 *= 1.05;
  10708. /* Rayleigh quotient residual bound. */
  10709. if (a2 < .563) {
  10710. s = gam * (1. - sqrt (a2)) / (a2 + 1.);
  10711. }
  10712. }
  10713. } else if (*dmin__ == *dn2) {
  10714. /* Case 5. */
  10715. *ttype = -5;
  10716. s = *dmin__ * .25;
  10717. /* Compute contribution to norm squared from I > NN-2. */
  10718. np = nn - (*pp << 1);
  10719. b1 = z__[np - 2];
  10720. b2 = z__[np - 6];
  10721. gam = *dn2;
  10722. if (z__[np - 8] > b2 || z__[np - 4] > b1) {
  10723. return 0;
  10724. }
  10725. a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.);
  10726. /* Approximate contribution to norm squared from I < NN-2. */
  10727. if (*n0 - *i0 > 2) {
  10728. b2 = z__[nn - 13] / z__[nn - 15];
  10729. a2 += b2;
  10730. i__1 = (*i0 << 2) - 1 + *pp;
  10731. for (i4 = nn - 17; i4 >= i__1; i4 += -4) {
  10732. if (b2 == 0.) {
  10733. goto L40;
  10734. }
  10735. b1 = b2;
  10736. if (z__[i4] > z__[i4 - 2]) {
  10737. return 0;
  10738. }
  10739. b2 *= z__[i4] / z__[i4 - 2];
  10740. a2 += b2;
  10741. if (MAX (b2, b1) * 100. < a2 || .563 < a2) {
  10742. goto L40;
  10743. }
  10744. /* L30: */
  10745. }
  10746. L40:
  10747. a2 *= 1.05;
  10748. }
  10749. if (a2 < .563) {
  10750. s = gam * (1. - sqrt (a2)) / (a2 + 1.);
  10751. }
  10752. } else {
  10753. /* Case 6, no information to guide us. */
  10754. if (*ttype == -6) {
  10755. g += (1. - g) * .333;
  10756. } else if (*ttype == -18) {
  10757. g = .083250000000000005;
  10758. } else {
  10759. g = .25;
  10760. }
  10761. s = g * *dmin__;
  10762. *ttype = -6;
  10763. }
  10764. } else if (*n0in == *n0 + 1) {
  10765. /* One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. */
  10766. if (*dmin1 == *dn1 && *dmin2 == *dn2) {
  10767. /* Cases 7 and 8. */
  10768. *ttype = -7;
  10769. s = *dmin1 * .333;
  10770. if (z__[nn - 5] > z__[nn - 7]) {
  10771. return 0;
  10772. }
  10773. b1 = z__[nn - 5] / z__[nn - 7];
  10774. b2 = b1;
  10775. if (b2 == 0.) {
  10776. goto L60;
  10777. }
  10778. i__1 = (*i0 << 2) - 1 + *pp;
  10779. for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) {
  10780. a2 = b1;
  10781. if (z__[i4] > z__[i4 - 2]) {
  10782. return 0;
  10783. }
  10784. b1 *= z__[i4] / z__[i4 - 2];
  10785. b2 += b1;
  10786. if (MAX (b1, a2) * 100. < b2) {
  10787. goto L60;
  10788. }
  10789. /* L50: */
  10790. }
  10791. L60:
  10792. b2 = sqrt (b2 * 1.05);
  10793. /* Computing 2nd power */
  10794. d__1 = b2;
  10795. a2 = *dmin1 / (d__1 * d__1 + 1.);
  10796. gap2 = *dmin2 * .5 - a2;
  10797. if (gap2 > 0. && gap2 > b2 * a2) {
  10798. /* Computing MAX */
  10799. d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2);
  10800. s = MAX (d__1, d__2);
  10801. } else {
  10802. /* Computing MAX */
  10803. d__1 = s, d__2 = a2 * (1. - b2 * 1.01);
  10804. s = MAX (d__1, d__2);
  10805. *ttype = -8;
  10806. }
  10807. } else {
  10808. /* Case 9. */
  10809. s = *dmin1 * .25;
  10810. if (*dmin1 == *dn1) {
  10811. s = *dmin1 * .5;
  10812. }
  10813. *ttype = -9;
  10814. }
  10815. } else if (*n0in == *n0 + 2) {
  10816. /* Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN.
  10817. Cases 10 and 11. */
  10818. if (*dmin2 == *dn2 && z__[nn - 5] * 2. < z__[nn - 7]) {
  10819. *ttype = -10;
  10820. s = *dmin2 * .333;
  10821. if (z__[nn - 5] > z__[nn - 7]) {
  10822. return 0;
  10823. }
  10824. b1 = z__[nn - 5] / z__[nn - 7];
  10825. b2 = b1;
  10826. if (b2 == 0.) {
  10827. goto L80;
  10828. }
  10829. i__1 = (*i0 << 2) - 1 + *pp;
  10830. for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) {
  10831. if (z__[i4] > z__[i4 - 2]) {
  10832. return 0;
  10833. }
  10834. b1 *= z__[i4] / z__[i4 - 2];
  10835. b2 += b1;
  10836. if (b1 * 100. < b2) {
  10837. goto L80;
  10838. }
  10839. /* L70: */
  10840. }
  10841. L80:
  10842. b2 = sqrt (b2 * 1.05);
  10843. /* Computing 2nd power */
  10844. d__1 = b2;
  10845. a2 = *dmin2 / (d__1 * d__1 + 1.);
  10846. gap2 = z__[nn - 7] + z__[nn - 9] - sqrt (z__[nn - 11]) * sqrt (z__[nn - 9]) - a2;
  10847. if (gap2 > 0. && gap2 > b2 * a2) {
  10848. /* Computing MAX */
  10849. d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2);
  10850. s = MAX (d__1, d__2);
  10851. } else {
  10852. /* Computing MAX */
  10853. d__1 = s, d__2 = a2 * (1. - b2 * 1.01);
  10854. s = MAX (d__1, d__2);
  10855. }
  10856. } else {
  10857. s = *dmin2 * .25;
  10858. *ttype = -11;
  10859. }
  10860. } else if (*n0in > *n0 + 2) {
  10861. /* Case 12, more than two eigenvalues deflated. No information. */
  10862. s = 0.;
  10863. *ttype = -12;
  10864. }
  10865. *tau = s;
  10866. return 0;
  10867. } /* NUMlapack_dlasq4 */
  10868. int NUMlapack_dlasq5 (integer *i0, integer *n0, double *z__, integer *pp, double *tau, double *dmin__, double *dmin1,
  10869. double *dmin2, double *dn, double *dnm1, double *dnm2, integer *ieee) {
  10870. /* System generated locals */
  10871. integer i__1;
  10872. double d__1, d__2;
  10873. /* Local variables */
  10874. static double emin, temp, d__;
  10875. static integer j4, j4p2;
  10876. --z__;
  10877. /* Function Body */
  10878. if (*n0 - *i0 - 1 <= 0) {
  10879. return 0;
  10880. }
  10881. j4 = (*i0 << 2) + *pp - 3;
  10882. emin = z__[j4 + 4];
  10883. d__ = z__[j4] - *tau;
  10884. *dmin__ = d__;
  10885. *dmin1 = -z__[j4];
  10886. if (*ieee) {
  10887. /* Code for IEEE arithmetic. */
  10888. if (*pp == 0) {
  10889. i__1 = *n0 - 3 << 2;
  10890. for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
  10891. z__[j4 - 2] = d__ + z__[j4 - 1];
  10892. temp = z__[j4 + 1] / z__[j4 - 2];
  10893. d__ = d__ * temp - *tau;
  10894. *dmin__ = MIN (*dmin__, d__);
  10895. z__[j4] = z__[j4 - 1] * temp;
  10896. /* Computing MIN */
  10897. d__1 = z__[j4];
  10898. emin = MIN (d__1, emin);
  10899. /* L10: */
  10900. }
  10901. } else {
  10902. i__1 = *n0 - 3 << 2;
  10903. for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
  10904. z__[j4 - 3] = d__ + z__[j4];
  10905. temp = z__[j4 + 2] / z__[j4 - 3];
  10906. d__ = d__ * temp - *tau;
  10907. *dmin__ = MIN (*dmin__, d__);
  10908. z__[j4 - 1] = z__[j4] * temp;
  10909. /* Computing MIN */
  10910. d__1 = z__[j4 - 1];
  10911. emin = MIN (d__1, emin);
  10912. /* L20: */
  10913. }
  10914. }
  10915. /* Unroll last two steps. */
  10916. *dnm2 = d__;
  10917. *dmin2 = *dmin__;
  10918. j4 = (*n0 - 2 << 2) - *pp;
  10919. j4p2 = j4 + (*pp << 1) - 1;
  10920. z__[j4 - 2] = *dnm2 + z__[j4p2];
  10921. z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
  10922. *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
  10923. *dmin__ = MIN (*dmin__, *dnm1);
  10924. *dmin1 = *dmin__;
  10925. j4 += 4;
  10926. j4p2 = j4 + (*pp << 1) - 1;
  10927. z__[j4 - 2] = *dnm1 + z__[j4p2];
  10928. z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
  10929. *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
  10930. *dmin__ = MIN (*dmin__, *dn);
  10931. } else {
  10932. /* Code for non IEEE arithmetic. */
  10933. if (*pp == 0) {
  10934. i__1 = *n0 - 3 << 2;
  10935. for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
  10936. z__[j4 - 2] = d__ + z__[j4 - 1];
  10937. if (d__ < 0.) {
  10938. return 0;
  10939. } else {
  10940. z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
  10941. d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau;
  10942. }
  10943. *dmin__ = MIN (*dmin__, d__);
  10944. /* Computing MIN */
  10945. d__1 = emin, d__2 = z__[j4];
  10946. emin = MIN (d__1, d__2);
  10947. /* L30: */
  10948. }
  10949. } else {
  10950. i__1 = *n0 - 3 << 2;
  10951. for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
  10952. z__[j4 - 3] = d__ + z__[j4];
  10953. if (d__ < 0.) {
  10954. return 0;
  10955. } else {
  10956. z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
  10957. d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau;
  10958. }
  10959. *dmin__ = MIN (*dmin__, d__);
  10960. /* Computing MIN */
  10961. d__1 = emin, d__2 = z__[j4 - 1];
  10962. emin = MIN (d__1, d__2);
  10963. /* L40: */
  10964. }
  10965. }
  10966. /* Unroll last two steps. */
  10967. *dnm2 = d__;
  10968. *dmin2 = *dmin__;
  10969. j4 = (*n0 - 2 << 2) - *pp;
  10970. j4p2 = j4 + (*pp << 1) - 1;
  10971. z__[j4 - 2] = *dnm2 + z__[j4p2];
  10972. if (*dnm2 < 0.) {
  10973. return 0;
  10974. } else {
  10975. z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
  10976. *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
  10977. }
  10978. *dmin__ = MIN (*dmin__, *dnm1);
  10979. *dmin1 = *dmin__;
  10980. j4 += 4;
  10981. j4p2 = j4 + (*pp << 1) - 1;
  10982. z__[j4 - 2] = *dnm1 + z__[j4p2];
  10983. if (*dnm1 < 0.) {
  10984. return 0;
  10985. } else {
  10986. z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
  10987. *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
  10988. }
  10989. *dmin__ = MIN (*dmin__, *dn);
  10990. }
  10991. z__[j4 + 2] = *dn;
  10992. z__[ (*n0 << 2) - *pp] = emin;
  10993. return 0;
  10994. } /* NUMlapack_dlasq5 */
  10995. int NUMlapack_dlasq6 (integer *i0, integer *n0, double *z__, integer *pp, double *dmin__, double *dmin1, double *dmin2,
  10996. double *dn, double *dnm1, double *dnm2) {
  10997. /* System generated locals */
  10998. integer i__1;
  10999. double d__1, d__2;
  11000. /* Local variables */
  11001. static double emin, temp, d__;
  11002. static integer j4;
  11003. static double safmin;
  11004. static integer j4p2;
  11005. /* Parameter adjustments */
  11006. --z__;
  11007. /* Function Body */
  11008. if (*n0 - *i0 - 1 <= 0) {
  11009. return 0;
  11010. }
  11011. safmin = NUMblas_dlamch ("Safe minimum");
  11012. j4 = (*i0 << 2) + *pp - 3;
  11013. emin = z__[j4 + 4];
  11014. d__ = z__[j4];
  11015. *dmin__ = d__;
  11016. if (*pp == 0) {
  11017. i__1 = *n0 - 3 << 2;
  11018. for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
  11019. z__[j4 - 2] = d__ + z__[j4 - 1];
  11020. if (z__[j4 - 2] == 0.) {
  11021. z__[j4] = 0.;
  11022. d__ = z__[j4 + 1];
  11023. *dmin__ = d__;
  11024. emin = 0.;
  11025. } else if (safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4 - 2] < z__[j4 + 1]) {
  11026. temp = z__[j4 + 1] / z__[j4 - 2];
  11027. z__[j4] = z__[j4 - 1] * temp;
  11028. d__ *= temp;
  11029. } else {
  11030. z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
  11031. d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]);
  11032. }
  11033. *dmin__ = MIN (*dmin__, d__);
  11034. /* Computing MIN */
  11035. d__1 = emin, d__2 = z__[j4];
  11036. emin = MIN (d__1, d__2);
  11037. /* L10: */
  11038. }
  11039. } else {
  11040. i__1 = *n0 - 3 << 2;
  11041. for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) {
  11042. z__[j4 - 3] = d__ + z__[j4];
  11043. if (z__[j4 - 3] == 0.) {
  11044. z__[j4 - 1] = 0.;
  11045. d__ = z__[j4 + 2];
  11046. *dmin__ = d__;
  11047. emin = 0.;
  11048. } else if (safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4 - 3] < z__[j4 + 2]) {
  11049. temp = z__[j4 + 2] / z__[j4 - 3];
  11050. z__[j4 - 1] = z__[j4] * temp;
  11051. d__ *= temp;
  11052. } else {
  11053. z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
  11054. d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]);
  11055. }
  11056. *dmin__ = MIN (*dmin__, d__);
  11057. /* Computing MIN */
  11058. d__1 = emin, d__2 = z__[j4 - 1];
  11059. emin = MIN (d__1, d__2);
  11060. /* L20: */
  11061. }
  11062. }
  11063. /* Unroll last two steps. */
  11064. *dnm2 = d__;
  11065. *dmin2 = *dmin__;
  11066. j4 = (*n0 - 2 << 2) - *pp;
  11067. j4p2 = j4 + (*pp << 1) - 1;
  11068. z__[j4 - 2] = *dnm2 + z__[j4p2];
  11069. if (z__[j4 - 2] == 0.) {
  11070. z__[j4] = 0.;
  11071. *dnm1 = z__[j4p2 + 2];
  11072. *dmin__ = *dnm1;
  11073. emin = 0.;
  11074. } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < z__[j4p2 + 2]) {
  11075. temp = z__[j4p2 + 2] / z__[j4 - 2];
  11076. z__[j4] = z__[j4p2] * temp;
  11077. *dnm1 = *dnm2 * temp;
  11078. } else {
  11079. z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
  11080. *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]);
  11081. }
  11082. *dmin__ = MIN (*dmin__, *dnm1);
  11083. *dmin1 = *dmin__;
  11084. j4 += 4;
  11085. j4p2 = j4 + (*pp << 1) - 1;
  11086. z__[j4 - 2] = *dnm1 + z__[j4p2];
  11087. if (z__[j4 - 2] == 0.) {
  11088. z__[j4] = 0.;
  11089. *dn = z__[j4p2 + 2];
  11090. *dmin__ = *dn;
  11091. emin = 0.;
  11092. } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < z__[j4p2 + 2]) {
  11093. temp = z__[j4p2 + 2] / z__[j4 - 2];
  11094. z__[j4] = z__[j4p2] * temp;
  11095. *dn = *dnm1 * temp;
  11096. } else {
  11097. z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
  11098. *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]);
  11099. }
  11100. *dmin__ = MIN (*dmin__, *dn);
  11101. z__[j4 + 2] = *dn;
  11102. z__[ (*n0 << 2) - *pp] = emin;
  11103. return 0;
  11104. } /* NUMlapack_dlasq6 */
  11105. int NUMlapack_dlasr (const char *side, const char *pivot, const char *direct, integer *m, integer *n, double *c__, double *s, double *a,
  11106. integer *lda) {
  11107. /* System generated locals */
  11108. integer a_dim1, a_offset, i__1, i__2;
  11109. /* Local variables */
  11110. static integer info;
  11111. static double temp;
  11112. static integer i__, j;
  11113. static double ctemp, stemp;
  11114. --c__;
  11115. --s;
  11116. a_dim1 = *lda;
  11117. a_offset = 1 + a_dim1 * 1;
  11118. a -= a_offset;
  11119. /* Function Body */
  11120. info = 0;
  11121. if (! (lsame_ (side, "L") || lsame_ (side, "R"))) {
  11122. info = 1;
  11123. } else if (! (lsame_ (pivot, "V") || lsame_ (pivot, "T") || lsame_ (pivot, "B"))) {
  11124. info = 2;
  11125. } else if (! (lsame_ (direct, "F") || lsame_ (direct, "B"))) {
  11126. info = 3;
  11127. } else if (*m < 0) {
  11128. info = 4;
  11129. } else if (*n < 0) {
  11130. info = 5;
  11131. } else if (*lda < MAX (1, *m)) {
  11132. info = 9;
  11133. }
  11134. if (info != 0) {
  11135. xerbla_ ("DLASR ", &info);
  11136. return 0;
  11137. }
  11138. /* Quick return if possible */
  11139. if (*m == 0 || *n == 0) {
  11140. return 0;
  11141. }
  11142. if (lsame_ (side, "L")) {
  11143. /* Form P * A */
  11144. if (lsame_ (pivot, "V")) {
  11145. if (lsame_ (direct, "F")) {
  11146. i__1 = *m - 1;
  11147. for (j = 1; j <= i__1; ++j) {
  11148. ctemp = c__[j];
  11149. stemp = s[j];
  11150. if (ctemp != 1. || stemp != 0.) {
  11151. i__2 = *n;
  11152. for (i__ = 1; i__ <= i__2; ++i__) {
  11153. temp = a_ref (j + 1, i__);
  11154. a_ref (j + 1, i__) = ctemp * temp - stemp * a_ref (j, i__);
  11155. a_ref (j, i__) = stemp * temp + ctemp * a_ref (j, i__);
  11156. /* L10: */
  11157. }
  11158. }
  11159. /* L20: */
  11160. }
  11161. } else if (lsame_ (direct, "B")) {
  11162. for (j = *m - 1; j >= 1; --j) {
  11163. ctemp = c__[j];
  11164. stemp = s[j];
  11165. if (ctemp != 1. || stemp != 0.) {
  11166. i__1 = *n;
  11167. for (i__ = 1; i__ <= i__1; ++i__) {
  11168. temp = a_ref (j + 1, i__);
  11169. a_ref (j + 1, i__) = ctemp * temp - stemp * a_ref (j, i__);
  11170. a_ref (j, i__) = stemp * temp + ctemp * a_ref (j, i__);
  11171. /* L30: */
  11172. }
  11173. }
  11174. /* L40: */
  11175. }
  11176. }
  11177. } else if (lsame_ (pivot, "T")) {
  11178. if (lsame_ (direct, "F")) {
  11179. i__1 = *m;
  11180. for (j = 2; j <= i__1; ++j) {
  11181. ctemp = c__[j - 1];
  11182. stemp = s[j - 1];
  11183. if (ctemp != 1. || stemp != 0.) {
  11184. i__2 = *n;
  11185. for (i__ = 1; i__ <= i__2; ++i__) {
  11186. temp = a_ref (j, i__);
  11187. a_ref (j, i__) = ctemp * temp - stemp * a_ref (1, i__);
  11188. a_ref (1, i__) = stemp * temp + ctemp * a_ref (1, i__);
  11189. /* L50: */
  11190. }
  11191. }
  11192. /* L60: */
  11193. }
  11194. } else if (lsame_ (direct, "B")) {
  11195. for (j = *m; j >= 2; --j) {
  11196. ctemp = c__[j - 1];
  11197. stemp = s[j - 1];
  11198. if (ctemp != 1. || stemp != 0.) {
  11199. i__1 = *n;
  11200. for (i__ = 1; i__ <= i__1; ++i__) {
  11201. temp = a_ref (j, i__);
  11202. a_ref (j, i__) = ctemp * temp - stemp * a_ref (1, i__);
  11203. a_ref (1, i__) = stemp * temp + ctemp * a_ref (1, i__);
  11204. /* L70: */
  11205. }
  11206. }
  11207. /* L80: */
  11208. }
  11209. }
  11210. } else if (lsame_ (pivot, "B")) {
  11211. if (lsame_ (direct, "F")) {
  11212. i__1 = *m - 1;
  11213. for (j = 1; j <= i__1; ++j) {
  11214. ctemp = c__[j];
  11215. stemp = s[j];
  11216. if (ctemp != 1. || stemp != 0.) {
  11217. i__2 = *n;
  11218. for (i__ = 1; i__ <= i__2; ++i__) {
  11219. temp = a_ref (j, i__);
  11220. a_ref (j, i__) = stemp * a_ref (*m, i__) + ctemp * temp;
  11221. a_ref (*m, i__) = ctemp * a_ref (*m, i__) - stemp * temp;
  11222. /* L90: */
  11223. }
  11224. }
  11225. /* L100: */
  11226. }
  11227. } else if (lsame_ (direct, "B")) {
  11228. for (j = *m - 1; j >= 1; --j) {
  11229. ctemp = c__[j];
  11230. stemp = s[j];
  11231. if (ctemp != 1. || stemp != 0.) {
  11232. i__1 = *n;
  11233. for (i__ = 1; i__ <= i__1; ++i__) {
  11234. temp = a_ref (j, i__);
  11235. a_ref (j, i__) = stemp * a_ref (*m, i__) + ctemp * temp;
  11236. a_ref (*m, i__) = ctemp * a_ref (*m, i__) - stemp * temp;
  11237. /* L110: */
  11238. }
  11239. }
  11240. /* L120: */
  11241. }
  11242. }
  11243. }
  11244. } else if (lsame_ (side, "R")) {
  11245. /* Form A * P' */
  11246. if (lsame_ (pivot, "V")) {
  11247. if (lsame_ (direct, "F")) {
  11248. i__1 = *n - 1;
  11249. for (j = 1; j <= i__1; ++j) {
  11250. ctemp = c__[j];
  11251. stemp = s[j];
  11252. if (ctemp != 1. || stemp != 0.) {
  11253. i__2 = *m;
  11254. for (i__ = 1; i__ <= i__2; ++i__) {
  11255. temp = a_ref (i__, j + 1);
  11256. a_ref (i__, j + 1) = ctemp * temp - stemp * a_ref (i__, j);
  11257. a_ref (i__, j) = stemp * temp + ctemp * a_ref (i__, j);
  11258. /* L130: */
  11259. }
  11260. }
  11261. /* L140: */
  11262. }
  11263. } else if (lsame_ (direct, "B")) {
  11264. for (j = *n - 1; j >= 1; --j) {
  11265. ctemp = c__[j];
  11266. stemp = s[j];
  11267. if (ctemp != 1. || stemp != 0.) {
  11268. i__1 = *m;
  11269. for (i__ = 1; i__ <= i__1; ++i__) {
  11270. temp = a_ref (i__, j + 1);
  11271. a_ref (i__, j + 1) = ctemp * temp - stemp * a_ref (i__, j);
  11272. a_ref (i__, j) = stemp * temp + ctemp * a_ref (i__, j);
  11273. /* L150: */
  11274. }
  11275. }
  11276. /* L160: */
  11277. }
  11278. }
  11279. } else if (lsame_ (pivot, "T")) {
  11280. if (lsame_ (direct, "F")) {
  11281. i__1 = *n;
  11282. for (j = 2; j <= i__1; ++j) {
  11283. ctemp = c__[j - 1];
  11284. stemp = s[j - 1];
  11285. if (ctemp != 1. || stemp != 0.) {
  11286. i__2 = *m;
  11287. for (i__ = 1; i__ <= i__2; ++i__) {
  11288. temp = a_ref (i__, j);
  11289. a_ref (i__, j) = ctemp * temp - stemp * a_ref (i__, 1);
  11290. a_ref (i__, 1) = stemp * temp + ctemp * a_ref (i__, 1);
  11291. /* L170: */
  11292. }
  11293. }
  11294. /* L180: */
  11295. }
  11296. } else if (lsame_ (direct, "B")) {
  11297. for (j = *n; j >= 2; --j) {
  11298. ctemp = c__[j - 1];
  11299. stemp = s[j - 1];
  11300. if (ctemp != 1. || stemp != 0.) {
  11301. i__1 = *m;
  11302. for (i__ = 1; i__ <= i__1; ++i__) {
  11303. temp = a_ref (i__, j);
  11304. a_ref (i__, j) = ctemp * temp - stemp * a_ref (i__, 1);
  11305. a_ref (i__, 1) = stemp * temp + ctemp * a_ref (i__, 1);
  11306. /* L190: */
  11307. }
  11308. }
  11309. /* L200: */
  11310. }
  11311. }
  11312. } else if (lsame_ (pivot, "B")) {
  11313. if (lsame_ (direct, "F")) {
  11314. i__1 = *n - 1;
  11315. for (j = 1; j <= i__1; ++j) {
  11316. ctemp = c__[j];
  11317. stemp = s[j];
  11318. if (ctemp != 1. || stemp != 0.) {
  11319. i__2 = *m;
  11320. for (i__ = 1; i__ <= i__2; ++i__) {
  11321. temp = a_ref (i__, j);
  11322. a_ref (i__, j) = stemp * a_ref (i__, *n) + ctemp * temp;
  11323. a_ref (i__, *n) = ctemp * a_ref (i__, *n) - stemp * temp;
  11324. /* L210: */
  11325. }
  11326. }
  11327. /* L220: */
  11328. }
  11329. } else if (lsame_ (direct, "B")) {
  11330. for (j = *n - 1; j >= 1; --j) {
  11331. ctemp = c__[j];
  11332. stemp = s[j];
  11333. if (ctemp != 1. || stemp != 0.) {
  11334. i__1 = *m;
  11335. for (i__ = 1; i__ <= i__1; ++i__) {
  11336. temp = a_ref (i__, j);
  11337. a_ref (i__, j) = stemp * a_ref (i__, *n) + ctemp * temp;
  11338. a_ref (i__, *n) = ctemp * a_ref (i__, *n) - stemp * temp;
  11339. /* L230: */
  11340. }
  11341. }
  11342. /* L240: */
  11343. }
  11344. }
  11345. }
  11346. }
  11347. return 0;
  11348. } /* NUMlapack_dlasr */
  11349. #define stack_ref(a_1,a_2) stack[(a_2)*2 + a_1 - 3]
  11350. int NUMlapack_dlasrt (const char *id, integer *n, double *d__, integer *info) {
  11351. /* System generated locals */
  11352. integer i__1, i__2;
  11353. /* Local variables */
  11354. static integer endd, i__, j;
  11355. static integer stack[64] /* was [2][32] */ ;
  11356. static double dmnmx, d1, d2, d3;
  11357. static integer start;
  11358. static integer stkpnt, dir;
  11359. static double tmp;
  11360. --d__;
  11361. /* Function Body */
  11362. *info = 0;
  11363. dir = -1;
  11364. if (lsame_ (id, "D")) {
  11365. dir = 0;
  11366. } else if (lsame_ (id, "I")) {
  11367. dir = 1;
  11368. }
  11369. if (dir == -1) {
  11370. *info = -1;
  11371. } else if (*n < 0) {
  11372. *info = -2;
  11373. }
  11374. if (*info != 0) {
  11375. i__1 = - (*info);
  11376. xerbla_ ("DLASRT", &i__1);
  11377. return 0;
  11378. }
  11379. /* Quick return if possible */
  11380. if (*n <= 1) {
  11381. return 0;
  11382. }
  11383. stkpnt = 1;
  11384. stack_ref (1, 1) = 1;
  11385. stack_ref (2, 1) = *n;
  11386. L10:
  11387. start = stack_ref (1, stkpnt);
  11388. endd = stack_ref (2, stkpnt);
  11389. --stkpnt;
  11390. if (endd - start <= 20 && endd - start > 0) {
  11391. /* Do Insertion sort on D( START:ENDD ) */
  11392. if (dir == 0) {
  11393. /* Sort into decreasing order */
  11394. i__1 = endd;
  11395. for (i__ = start + 1; i__ <= i__1; ++i__) {
  11396. i__2 = start + 1;
  11397. for (j = i__; j >= i__2; --j) {
  11398. if (d__[j] > d__[j - 1]) {
  11399. dmnmx = d__[j];
  11400. d__[j] = d__[j - 1];
  11401. d__[j - 1] = dmnmx;
  11402. } else {
  11403. goto L30;
  11404. }
  11405. /* L20: */
  11406. }
  11407. L30:
  11408. ;
  11409. }
  11410. } else {
  11411. /* Sort into increasing order */
  11412. i__1 = endd;
  11413. for (i__ = start + 1; i__ <= i__1; ++i__) {
  11414. i__2 = start + 1;
  11415. for (j = i__; j >= i__2; --j) {
  11416. if (d__[j] < d__[j - 1]) {
  11417. dmnmx = d__[j];
  11418. d__[j] = d__[j - 1];
  11419. d__[j - 1] = dmnmx;
  11420. } else {
  11421. goto L50;
  11422. }
  11423. /* L40: */
  11424. }
  11425. L50:
  11426. ;
  11427. }
  11428. }
  11429. } else if (endd - start > 20) {
  11430. /* Partition D( START:ENDD ) and stack parts, largest one first
  11431. Choose partition entry as median of 3 */
  11432. d1 = d__[start];
  11433. d2 = d__[endd];
  11434. i__ = (start + endd) / 2;
  11435. d3 = d__[i__];
  11436. if (d1 < d2) {
  11437. if (d3 < d1) {
  11438. dmnmx = d1;
  11439. } else if (d3 < d2) {
  11440. dmnmx = d3;
  11441. } else {
  11442. dmnmx = d2;
  11443. }
  11444. } else {
  11445. if (d3 < d2) {
  11446. dmnmx = d2;
  11447. } else if (d3 < d1) {
  11448. dmnmx = d3;
  11449. } else {
  11450. dmnmx = d1;
  11451. }
  11452. }
  11453. if (dir == 0) {
  11454. /* Sort into decreasing order */
  11455. i__ = start - 1;
  11456. j = endd + 1;
  11457. L60:
  11458. L70:
  11459. --j;
  11460. if (d__[j] < dmnmx) {
  11461. goto L70;
  11462. }
  11463. L80:
  11464. ++i__;
  11465. if (d__[i__] > dmnmx) {
  11466. goto L80;
  11467. }
  11468. if (i__ < j) {
  11469. tmp = d__[i__];
  11470. d__[i__] = d__[j];
  11471. d__[j] = tmp;
  11472. goto L60;
  11473. }
  11474. if (j - start > endd - j - 1) {
  11475. ++stkpnt;
  11476. stack_ref (1, stkpnt) = start;
  11477. stack_ref (2, stkpnt) = j;
  11478. ++stkpnt;
  11479. stack_ref (1, stkpnt) = j + 1;
  11480. stack_ref (2, stkpnt) = endd;
  11481. } else {
  11482. ++stkpnt;
  11483. stack_ref (1, stkpnt) = j + 1;
  11484. stack_ref (2, stkpnt) = endd;
  11485. ++stkpnt;
  11486. stack_ref (1, stkpnt) = start;
  11487. stack_ref (2, stkpnt) = j;
  11488. }
  11489. } else {
  11490. /* Sort into increasing order */
  11491. i__ = start - 1;
  11492. j = endd + 1;
  11493. L90:
  11494. L100:
  11495. --j;
  11496. if (d__[j] > dmnmx) {
  11497. goto L100;
  11498. }
  11499. L110:
  11500. ++i__;
  11501. if (d__[i__] < dmnmx) {
  11502. goto L110;
  11503. }
  11504. if (i__ < j) {
  11505. tmp = d__[i__];
  11506. d__[i__] = d__[j];
  11507. d__[j] = tmp;
  11508. goto L90;
  11509. }
  11510. if (j - start > endd - j - 1) {
  11511. ++stkpnt;
  11512. stack_ref (1, stkpnt) = start;
  11513. stack_ref (2, stkpnt) = j;
  11514. ++stkpnt;
  11515. stack_ref (1, stkpnt) = j + 1;
  11516. stack_ref (2, stkpnt) = endd;
  11517. } else {
  11518. ++stkpnt;
  11519. stack_ref (1, stkpnt) = j + 1;
  11520. stack_ref (2, stkpnt) = endd;
  11521. ++stkpnt;
  11522. stack_ref (1, stkpnt) = start;
  11523. stack_ref (2, stkpnt) = j;
  11524. }
  11525. }
  11526. }
  11527. if (stkpnt > 0) {
  11528. goto L10;
  11529. }
  11530. return 0;
  11531. } /* NUMlapack_dlasrt */
  11532. #undef stack_ref
  11533. int NUMlapack_dlassq (integer *n, double *x, integer *incx, double *scale, double *sumsq) {
  11534. /* System generated locals */
  11535. integer i__1, i__2;
  11536. double d__1;
  11537. /* Local variables */
  11538. static double absxi;
  11539. static integer ix;
  11540. --x;
  11541. /* Function Body */
  11542. if (*n > 0) {
  11543. i__1 = (*n - 1) * *incx + 1;
  11544. i__2 = *incx;
  11545. for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
  11546. if (x[ix] != 0.) {
  11547. absxi = (d__1 = x[ix], fabs (d__1));
  11548. if (*scale < absxi) {
  11549. /* Computing 2nd power */
  11550. d__1 = *scale / absxi;
  11551. *sumsq = *sumsq * (d__1 * d__1) + 1;
  11552. *scale = absxi;
  11553. } else {
  11554. /* Computing 2nd power */
  11555. d__1 = absxi / *scale;
  11556. *sumsq += d__1 * d__1;
  11557. }
  11558. }
  11559. /* L10: */
  11560. }
  11561. }
  11562. return 0;
  11563. } /* NUMlapack_dlassq */
  11564. int NUMlapack_dlasv2 (double *f, double *g, double *h__, double *ssmin, double *ssmax, double *snr, double *csr,
  11565. double *snl, double *csl) {
  11566. /* Table of constant values */
  11567. static double c_b3 = 2.;
  11568. static double c_b4 = 1.;
  11569. /* System generated locals */
  11570. double d__1;
  11571. /* Local variables */
  11572. static integer pmax;
  11573. static double temp;
  11574. static integer swap;
  11575. static double a, d__, l, m, r__, s, t, tsign, fa, ga, ha;
  11576. static double ft, gt, ht, mm;
  11577. static integer gasmal;
  11578. static double tt, clt, crt, slt, srt;
  11579. ft = *f;
  11580. fa = fabs (ft);
  11581. ht = *h__;
  11582. ha = fabs (*h__);
  11583. /* PMAX points to the maximum absolute element of matrix PMAX = 1 if F
  11584. largest in absolute values PMAX = 2 if G largest in absolute values
  11585. PMAX = 3 if H largest in absolute values */
  11586. pmax = 1;
  11587. swap = ha > fa;
  11588. if (swap) {
  11589. pmax = 3;
  11590. temp = ft;
  11591. ft = ht;
  11592. ht = temp;
  11593. temp = fa;
  11594. fa = ha;
  11595. ha = temp;
  11596. /* Now FA .ge. HA */
  11597. }
  11598. gt = *g;
  11599. ga = fabs (gt);
  11600. if (ga == 0.) {
  11601. /* Diagonal matrix */
  11602. *ssmin = ha;
  11603. *ssmax = fa;
  11604. clt = 1.;
  11605. crt = 1.;
  11606. slt = 0.;
  11607. srt = 0.;
  11608. } else {
  11609. gasmal = TRUE;
  11610. if (ga > fa) {
  11611. pmax = 2;
  11612. if (fa / ga < NUMblas_dlamch ("EPS")) {
  11613. /* Case of very large GA */
  11614. gasmal = FALSE;
  11615. *ssmax = ga;
  11616. if (ha > 1.) {
  11617. *ssmin = fa / (ga / ha);
  11618. } else {
  11619. *ssmin = fa / ga * ha;
  11620. }
  11621. clt = 1.;
  11622. slt = ht / gt;
  11623. srt = 1.;
  11624. crt = ft / gt;
  11625. }
  11626. }
  11627. if (gasmal) {
  11628. /* Normal case */
  11629. d__ = fa - ha;
  11630. if (d__ == fa) {
  11631. /* Copes with infinite F or H */
  11632. l = 1.;
  11633. } else {
  11634. l = d__ / fa;
  11635. }
  11636. /* Note that 0 .le. L .le. 1 */
  11637. m = gt / ft;
  11638. /* Note that abs(M) .le. 1/macheps */
  11639. t = 2. - l;
  11640. /* Note that T .ge. 1 */
  11641. mm = m * m;
  11642. tt = t * t;
  11643. s = sqrt (tt + mm);
  11644. /* Note that 1 .le. S .le. 1 + 1/macheps */
  11645. if (l == 0.) {
  11646. r__ = fabs (m);
  11647. } else {
  11648. r__ = sqrt (l * l + mm);
  11649. }
  11650. /* Note that 0 .le. R .le. 1 + 1/macheps */
  11651. a = (s + r__) * .5;
  11652. /* Note that 1 .le. A .le. 1 + abs(M) */
  11653. *ssmin = ha / a;
  11654. *ssmax = fa * a;
  11655. if (mm == 0.) {
  11656. /* Note that M is very tiny */
  11657. if (l == 0.) {
  11658. t = d_sign (&c_b3, &ft) * d_sign (&c_b4, &gt);
  11659. } else {
  11660. t = gt / d_sign (&d__, &ft) + m / t;
  11661. }
  11662. } else {
  11663. t = (m / (s + t) + m / (r__ + l)) * (a + 1.);
  11664. }
  11665. l = sqrt (t * t + 4.);
  11666. crt = 2. / l;
  11667. srt = t / l;
  11668. clt = (crt + srt * m) / a;
  11669. slt = ht / ft * srt / a;
  11670. }
  11671. }
  11672. if (swap) {
  11673. *csl = srt;
  11674. *snl = crt;
  11675. *csr = slt;
  11676. *snr = clt;
  11677. } else {
  11678. *csl = clt;
  11679. *snl = slt;
  11680. *csr = crt;
  11681. *snr = srt;
  11682. }
  11683. /* Correct signs of SSMAX and SSMIN */
  11684. if (pmax == 1) {
  11685. tsign = d_sign (&c_b4, csr) * d_sign (&c_b4, csl) * d_sign (&c_b4, f);
  11686. }
  11687. if (pmax == 2) {
  11688. tsign = d_sign (&c_b4, snr) * d_sign (&c_b4, csl) * d_sign (&c_b4, g);
  11689. }
  11690. if (pmax == 3) {
  11691. tsign = d_sign (&c_b4, snr) * d_sign (&c_b4, snl) * d_sign (&c_b4, h__);
  11692. }
  11693. *ssmax = d_sign (ssmax, &tsign);
  11694. d__1 = tsign * d_sign (&c_b4, f) * d_sign (&c_b4, h__);
  11695. *ssmin = d_sign (ssmin, &d__1);
  11696. return 0;
  11697. } /* NUMlapack_dlasv2 */
  11698. int NUMlapack_dlaswp (integer *n, double *a, integer *lda, integer *k1, integer *k2, integer *ipiv, integer *incx) {
  11699. /* System generated locals */
  11700. integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
  11701. /* Local variables */
  11702. static double temp;
  11703. static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
  11704. a_dim1 = *lda;
  11705. a_offset = 1 + a_dim1 * 1;
  11706. a -= a_offset;
  11707. --ipiv;
  11708. /* Function Body */
  11709. if (*incx > 0) {
  11710. ix0 = *k1;
  11711. i1 = *k1;
  11712. i2 = *k2;
  11713. inc = 1;
  11714. } else if (*incx < 0) {
  11715. ix0 = (1 - *k2) * *incx + 1;
  11716. i1 = *k2;
  11717. i2 = *k1;
  11718. inc = -1;
  11719. } else {
  11720. return 0;
  11721. }
  11722. n32 = *n / 32 << 5;
  11723. if (n32 != 0) {
  11724. i__1 = n32;
  11725. for (j = 1; j <= i__1; j += 32) {
  11726. ix = ix0;
  11727. i__2 = i2;
  11728. i__3 = inc;
  11729. for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3) {
  11730. ip = ipiv[ix];
  11731. if (ip != i__) {
  11732. i__4 = j + 31;
  11733. for (k = j; k <= i__4; ++k) {
  11734. temp = a_ref (i__, k);
  11735. a_ref (i__, k) = a_ref (ip, k);
  11736. a_ref (ip, k) = temp;
  11737. /* L10: */
  11738. }
  11739. }
  11740. ix += *incx;
  11741. /* L20: */
  11742. }
  11743. /* L30: */
  11744. }
  11745. }
  11746. if (n32 != *n) {
  11747. ++n32;
  11748. ix = ix0;
  11749. i__1 = i2;
  11750. i__3 = inc;
  11751. for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
  11752. ip = ipiv[ix];
  11753. if (ip != i__) {
  11754. i__2 = *n;
  11755. for (k = n32; k <= i__2; ++k) {
  11756. temp = a_ref (i__, k);
  11757. a_ref (i__, k) = a_ref (ip, k);
  11758. a_ref (ip, k) = temp;
  11759. /* L40: */
  11760. }
  11761. }
  11762. ix += *incx;
  11763. /* L50: */
  11764. }
  11765. }
  11766. return 0;
  11767. } /* NUMlapack_dlaswp */
  11768. #define w_ref(a_1,a_2) w[(a_2)*w_dim1 + a_1]
  11769. int NUMlapack_dlatrd (const char *uplo, integer *n, integer *nb, double *a, integer *lda, double *e, double *tau, double *w,
  11770. integer *ldw) {
  11771. /* Table of constant values */
  11772. static double c_b5 = -1.;
  11773. static double c_b6 = 1.;
  11774. static integer c__1 = 1;
  11775. static double c_b16 = 0.;
  11776. /* System generated locals */
  11777. integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
  11778. /* Local variables */
  11779. static integer i__;
  11780. static double alpha;
  11781. static integer iw;
  11782. a_dim1 = *lda;
  11783. a_offset = 1 + a_dim1 * 1;
  11784. a -= a_offset;
  11785. --e;
  11786. --tau;
  11787. w_dim1 = *ldw;
  11788. w_offset = 1 + w_dim1 * 1;
  11789. w -= w_offset;
  11790. /* Function Body */
  11791. if (*n <= 0) {
  11792. return 0;
  11793. }
  11794. if (lsame_ (uplo, "U")) {
  11795. /* Reduce last NB columns of upper triangle */
  11796. i__1 = *n - *nb + 1;
  11797. for (i__ = *n; i__ >= i__1; --i__) {
  11798. iw = i__ - *n + *nb;
  11799. if (i__ < *n) {
  11800. /* Update A(1:i,i) */
  11801. i__2 = *n - i__;
  11802. NUMblas_dgemv ("No transpose", &i__, &i__2, &c_b5, &a_ref (1, i__ + 1), lda, &w_ref (i__, iw + 1),
  11803. ldw, &c_b6, &a_ref (1, i__), &c__1);
  11804. i__2 = *n - i__;
  11805. NUMblas_dgemv ("No transpose", &i__, &i__2, &c_b5, &w_ref (1, iw + 1), ldw, &a_ref (i__, i__ + 1),
  11806. lda, &c_b6, &a_ref (1, i__), &c__1);
  11807. }
  11808. if (i__ > 1) {
  11809. /* Generate elementary reflector H(i) to annihilate
  11810. A(1:i-2,i) */
  11811. i__2 = i__ - 1;
  11812. NUMlapack_dlarfg (&i__2, &a_ref (i__ - 1, i__), &a_ref (1, i__), &c__1, &tau[i__ - 1]);
  11813. e[i__ - 1] = a_ref (i__ - 1, i__);
  11814. a_ref (i__ - 1, i__) = 1.;
  11815. /* Compute W(1:i-1,i) */
  11816. i__2 = i__ - 1;
  11817. NUMblas_dsymv ("Upper", &i__2, &c_b6, &a[a_offset], lda, &a_ref (1, i__), &c__1, &c_b16, &w_ref (1,
  11818. iw), &c__1);
  11819. if (i__ < *n) {
  11820. i__2 = i__ - 1;
  11821. i__3 = *n - i__;
  11822. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b6, &w_ref (1, iw + 1), ldw, &a_ref (1, i__), &c__1,
  11823. &c_b16, &w_ref (i__ + 1, iw), &c__1);
  11824. i__2 = i__ - 1;
  11825. i__3 = *n - i__;
  11826. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b5, &a_ref (1, i__ + 1), lda, &w_ref (i__ + 1,
  11827. iw), &c__1, &c_b6, &w_ref (1, iw), &c__1);
  11828. i__2 = i__ - 1;
  11829. i__3 = *n - i__;
  11830. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b6, &a_ref (1, i__ + 1), lda, &a_ref (1, i__),
  11831. &c__1, &c_b16, &w_ref (i__ + 1, iw), &c__1);
  11832. i__2 = i__ - 1;
  11833. i__3 = *n - i__;
  11834. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b5, &w_ref (1, iw + 1), ldw, &w_ref (i__ + 1,
  11835. iw), &c__1, &c_b6, &w_ref (1, iw), &c__1);
  11836. }
  11837. i__2 = i__ - 1;
  11838. NUMblas_dscal (&i__2, &tau[i__ - 1], &w_ref (1, iw), &c__1);
  11839. i__2 = i__ - 1;
  11840. alpha = tau[i__ - 1] * -.5 * NUMblas_ddot (&i__2, &w_ref (1, iw), &c__1, &a_ref (1, i__), &c__1);
  11841. i__2 = i__ - 1;
  11842. NUMblas_daxpy (&i__2, &alpha, &a_ref (1, i__), &c__1, &w_ref (1, iw), &c__1);
  11843. }
  11844. /* L10: */
  11845. }
  11846. } else {
  11847. /* Reduce first NB columns of lower triangle */
  11848. i__1 = *nb;
  11849. for (i__ = 1; i__ <= i__1; ++i__) {
  11850. /* Update A(i:n,i) */
  11851. i__2 = *n - i__ + 1;
  11852. i__3 = i__ - 1;
  11853. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b5, &a_ref (i__, 1), lda, &w_ref (i__, 1), ldw, &c_b6,
  11854. &a_ref (i__, i__), &c__1);
  11855. i__2 = *n - i__ + 1;
  11856. i__3 = i__ - 1;
  11857. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b5, &w_ref (i__, 1), ldw, &a_ref (i__, 1), lda, &c_b6,
  11858. &a_ref (i__, i__), &c__1);
  11859. if (i__ < *n) {
  11860. /* Generate elementary reflector H(i) to annihilate
  11861. A(i+2:n,i)
  11862. Computing MIN */
  11863. i__2 = i__ + 2;
  11864. i__3 = *n - i__;
  11865. NUMlapack_dlarfg (&i__3, &a_ref (i__ + 1, i__), &a_ref (MIN (i__2, *n), i__), &c__1, &tau[i__]);
  11866. e[i__] = a_ref (i__ + 1, i__);
  11867. a_ref (i__ + 1, i__) = 1.;
  11868. /* Compute W(i+1:n,i) */
  11869. i__2 = *n - i__;
  11870. NUMblas_dsymv ("Lower", &i__2, &c_b6, &a_ref (i__ + 1, i__ + 1), lda, &a_ref (i__ + 1, i__), &c__1,
  11871. &c_b16, &w_ref (i__ + 1, i__), &c__1);
  11872. i__2 = *n - i__;
  11873. i__3 = i__ - 1;
  11874. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b6, &w_ref (i__ + 1, 1), ldw, &a_ref (i__ + 1, i__),
  11875. &c__1, &c_b16, &w_ref (1, i__), &c__1);
  11876. i__2 = *n - i__;
  11877. i__3 = i__ - 1;
  11878. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b5, &a_ref (i__ + 1, 1), lda, &w_ref (1, i__), &c__1,
  11879. &c_b6, &w_ref (i__ + 1, i__), &c__1);
  11880. i__2 = *n - i__;
  11881. i__3 = i__ - 1;
  11882. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b6, &a_ref (i__ + 1, 1), lda, &a_ref (i__ + 1, i__),
  11883. &c__1, &c_b16, &w_ref (1, i__), &c__1);
  11884. i__2 = *n - i__;
  11885. i__3 = i__ - 1;
  11886. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b5, &w_ref (i__ + 1, 1), ldw, &w_ref (1, i__), &c__1,
  11887. &c_b6, &w_ref (i__ + 1, i__), &c__1);
  11888. i__2 = *n - i__;
  11889. NUMblas_dscal (&i__2, &tau[i__], &w_ref (i__ + 1, i__), &c__1);
  11890. i__2 = *n - i__;
  11891. alpha =
  11892. tau[i__] * -.5 * NUMblas_ddot (&i__2, &w_ref (i__ + 1, i__), &c__1, &a_ref (i__ + 1, i__),
  11893. &c__1);
  11894. i__2 = *n - i__;
  11895. NUMblas_daxpy (&i__2, &alpha, &a_ref (i__ + 1, i__), &c__1, &w_ref (i__ + 1, i__), &c__1);
  11896. }
  11897. /* L20: */
  11898. }
  11899. }
  11900. return 0;
  11901. } /* NUMlapack_dlatrd */
  11902. #undef w_ref
  11903. int NUMlapack_dorg2l (integer *m, integer *n, integer *k, double *a, integer *lda, double *tau, double *work, integer *info) {
  11904. /* Table of constant values */
  11905. static integer c__1 = 1;
  11906. /* System generated locals */
  11907. integer a_dim1, a_offset, i__1, i__2, i__3;
  11908. double d__1;
  11909. /* Local variables */
  11910. static integer i__, j, l;
  11911. static integer ii;
  11912. a_dim1 = *lda;
  11913. a_offset = 1 + a_dim1 * 1;
  11914. a -= a_offset;
  11915. --tau;
  11916. --work;
  11917. /* Function Body */
  11918. *info = 0;
  11919. if (*m < 0) {
  11920. *info = -1;
  11921. } else if (*n < 0 || *n > *m) {
  11922. *info = -2;
  11923. } else if (*k < 0 || *k > *n) {
  11924. *info = -3;
  11925. } else if (*lda < MAX (1, *m)) {
  11926. *info = -5;
  11927. }
  11928. if (*info != 0) {
  11929. i__1 = - (*info);
  11930. xerbla_ ("DORG2L", &i__1);
  11931. return 0;
  11932. }
  11933. /* Quick return if possible */
  11934. if (*n <= 0) {
  11935. return 0;
  11936. }
  11937. /* Initialise columns 1:n-k to columns of the unit matrix */
  11938. i__1 = *n - *k;
  11939. for (j = 1; j <= i__1; ++j) {
  11940. i__2 = *m;
  11941. for (l = 1; l <= i__2; ++l) {
  11942. a_ref (l, j) = 0.;
  11943. /* L10: */
  11944. }
  11945. a_ref (*m - *n + j, j) = 1.;
  11946. /* L20: */
  11947. }
  11948. i__1 = *k;
  11949. for (i__ = 1; i__ <= i__1; ++i__) {
  11950. ii = *n - *k + i__;
  11951. /* Apply H(i) to A(1:m-k+i,1:n-k+i) from the left */
  11952. a_ref (*m - *n + ii, ii) = 1.;
  11953. i__2 = *m - *n + ii;
  11954. i__3 = ii - 1;
  11955. NUMlapack_dlarf ("Left", &i__2, &i__3, &a_ref (1, ii), &c__1, &tau[i__], &a[a_offset], lda, &work[1]);
  11956. i__2 = *m - *n + ii - 1;
  11957. d__1 = -tau[i__];
  11958. NUMblas_dscal (&i__2, &d__1, &a_ref (1, ii), &c__1);
  11959. a_ref (*m - *n + ii, ii) = 1. - tau[i__];
  11960. /* Set A(m-k+i+1:m,n-k+i) to zero */
  11961. i__2 = *m;
  11962. for (l = *m - *n + ii + 1; l <= i__2; ++l) {
  11963. a_ref (l, ii) = 0.;
  11964. /* L30: */
  11965. }
  11966. /* L40: */
  11967. }
  11968. return 0;
  11969. } /* NUMlapack_dorg2l */
  11970. int NUMlapack_dorg2r (integer *m, integer *n, integer *k, double *a, integer *lda, double *tau, double *work, integer *info) {
  11971. /* Table of constant values */
  11972. static integer c__1 = 1;
  11973. /* System generated locals */
  11974. integer a_dim1, a_offset, i__1, i__2;
  11975. double d__1;
  11976. /* Local variables */
  11977. static integer i__, j, l;
  11978. a_dim1 = *lda;
  11979. a_offset = 1 + a_dim1 * 1;
  11980. a -= a_offset;
  11981. --tau;
  11982. --work;
  11983. /* Function Body */
  11984. *info = 0;
  11985. if (*m < 0) {
  11986. *info = -1;
  11987. } else if (*n < 0 || *n > *m) {
  11988. *info = -2;
  11989. } else if (*k < 0 || *k > *n) {
  11990. *info = -3;
  11991. } else if (*lda < MAX (1, *m)) {
  11992. *info = -5;
  11993. }
  11994. if (*info != 0) {
  11995. i__1 = - (*info);
  11996. xerbla_ ("DORG2R", &i__1);
  11997. return 0;
  11998. }
  11999. /* Quick return if possible */
  12000. if (*n <= 0) {
  12001. return 0;
  12002. }
  12003. /* Initialise columns k+1:n to columns of the unit matrix */
  12004. i__1 = *n;
  12005. for (j = *k + 1; j <= i__1; ++j) {
  12006. i__2 = *m;
  12007. for (l = 1; l <= i__2; ++l) {
  12008. a_ref (l, j) = 0.;
  12009. /* L10: */
  12010. }
  12011. a_ref (j, j) = 1.;
  12012. /* L20: */
  12013. }
  12014. for (i__ = *k; i__ >= 1; --i__) {
  12015. /* Apply H(i) to A(i:m,i:n) from the left */
  12016. if (i__ < *n) {
  12017. a_ref (i__, i__) = 1.;
  12018. i__1 = *m - i__ + 1;
  12019. i__2 = *n - i__;
  12020. NUMlapack_dlarf ("Left", &i__1, &i__2, &a_ref (i__, i__), &c__1, &tau[i__], &a_ref (i__, i__ + 1),
  12021. lda, &work[1]);
  12022. }
  12023. if (i__ < *m) {
  12024. i__1 = *m - i__;
  12025. d__1 = -tau[i__];
  12026. NUMblas_dscal (&i__1, &d__1, &a_ref (i__ + 1, i__), &c__1);
  12027. }
  12028. a_ref (i__, i__) = 1. - tau[i__];
  12029. /* Set A(1:i-1,i) to zero */
  12030. i__1 = i__ - 1;
  12031. for (l = 1; l <= i__1; ++l) {
  12032. a_ref (l, i__) = 0.;
  12033. /* L30: */
  12034. }
  12035. /* L40: */
  12036. }
  12037. return 0;
  12038. } /* NUMlapack_dorg2r */
  12039. int NUMlapack_dorgbr (const char *vect, integer *m, integer *n, integer *k, double *a, integer *lda, double *tau, double *work,
  12040. integer *lwork, integer *info) {
  12041. /* Table of constant values */
  12042. static integer c__1 = 1;
  12043. static integer c_n1 = -1;
  12044. /* System generated locals */
  12045. integer a_dim1, a_offset, i__1, i__2, i__3;
  12046. /* Local variables */
  12047. static integer i__, j;
  12048. static integer iinfo;
  12049. static integer wantq;
  12050. static integer nb, mn;
  12051. static integer lwkopt;
  12052. static integer lquery;
  12053. a_dim1 = *lda;
  12054. a_offset = 1 + a_dim1 * 1;
  12055. a -= a_offset;
  12056. --tau;
  12057. --work;
  12058. /* Function Body */
  12059. *info = 0;
  12060. wantq = lsame_ (vect, "Q");
  12061. mn = MIN (*m, *n);
  12062. lquery = *lwork == -1;
  12063. if (!wantq && !lsame_ (vect, "P")) {
  12064. *info = -1;
  12065. } else if (*m < 0) {
  12066. *info = -2;
  12067. } else if (*n < 0 || wantq && (*n > *m || *n < MIN (*m, *k)) || !wantq && (*m > *n || *m < MIN (*n, *k))) {
  12068. *info = -3;
  12069. } else if (*k < 0) {
  12070. *info = -4;
  12071. } else if (*lda < MAX (1, *m)) {
  12072. *info = -6;
  12073. } else if (*lwork < MAX (1, mn) && !lquery) {
  12074. *info = -9;
  12075. }
  12076. if (*info == 0) {
  12077. if (wantq) {
  12078. nb = NUMlapack_ilaenv (&c__1, "DORGQR", " ", m, n, k, &c_n1, 6, 1);
  12079. } else {
  12080. nb = NUMlapack_ilaenv (&c__1, "DORGLQ", " ", m, n, k, &c_n1, 6, 1);
  12081. }
  12082. lwkopt = MAX (1, mn) * nb;
  12083. work[1] = (double) lwkopt;
  12084. }
  12085. if (*info != 0) {
  12086. i__1 = - (*info);
  12087. xerbla_ ("DORGBR", &i__1);
  12088. return 0;
  12089. } else if (lquery) {
  12090. return 0;
  12091. }
  12092. /* Quick return if possible */
  12093. if (*m == 0 || *n == 0) {
  12094. work[1] = 1.;
  12095. return 0;
  12096. }
  12097. if (wantq) {
  12098. /* Form Q, determined by a call to DGEBRD to reduce an m-by-k matrix */
  12099. if (*m >= *k) {
  12100. /* If m >= k, assume m >= n >= k */
  12101. NUMlapack_dorgqr (m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &iinfo);
  12102. } else {
  12103. /* If m < k, assume m = n
  12104. Shift the vectors which define the elementary reflectors one
  12105. column to the right, and set the first row and column of Q to
  12106. those of the unit matrix */
  12107. for (j = *m; j >= 2; --j) {
  12108. a_ref (1, j) = 0.;
  12109. i__1 = *m;
  12110. for (i__ = j + 1; i__ <= i__1; ++i__) {
  12111. a_ref (i__, j) = a_ref (i__, j - 1);
  12112. /* L10: */
  12113. }
  12114. /* L20: */
  12115. }
  12116. a_ref (1, 1) = 1.;
  12117. i__1 = *m;
  12118. for (i__ = 2; i__ <= i__1; ++i__) {
  12119. a_ref (i__, 1) = 0.;
  12120. /* L30: */
  12121. }
  12122. if (*m > 1) {
  12123. /* Form Q(2:m,2:m) */
  12124. i__1 = *m - 1;
  12125. i__2 = *m - 1;
  12126. i__3 = *m - 1;
  12127. NUMlapack_dorgqr (&i__1, &i__2, &i__3, &a_ref (2, 2), lda, &tau[1], &work[1], lwork, &iinfo);
  12128. }
  12129. }
  12130. } else {
  12131. /* Form P', determined by a call to DGEBRD to reduce a k-by-n matrix */
  12132. if (*k < *n) {
  12133. /* If k < n, assume k <= m <= n */
  12134. NUMlapack_dorglq (m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &iinfo);
  12135. } else {
  12136. /* If k >= n, assume m = n
  12137. Shift the vectors which define the elementary reflectors one
  12138. row downward, and set the first row and column of P' to those
  12139. of the unit matrix */
  12140. a_ref (1, 1) = 1.;
  12141. i__1 = *n;
  12142. for (i__ = 2; i__ <= i__1; ++i__) {
  12143. a_ref (i__, 1) = 0.;
  12144. /* L40: */
  12145. }
  12146. i__1 = *n;
  12147. for (j = 2; j <= i__1; ++j) {
  12148. for (i__ = j - 1; i__ >= 2; --i__) {
  12149. a_ref (i__, j) = a_ref (i__ - 1, j);
  12150. /* L50: */
  12151. }
  12152. a_ref (1, j) = 0.;
  12153. /* L60: */
  12154. }
  12155. if (*n > 1) {
  12156. /* Form P'(2:n,2:n) */
  12157. i__1 = *n - 1;
  12158. i__2 = *n - 1;
  12159. i__3 = *n - 1;
  12160. NUMlapack_dorglq (&i__1, &i__2, &i__3, &a_ref (2, 2), lda, &tau[1], &work[1], lwork, &iinfo);
  12161. }
  12162. }
  12163. }
  12164. work[1] = (double) lwkopt;
  12165. return 0;
  12166. } /* NUMlapack_dorgbr */
  12167. int NUMlapack_dorghr (integer *n, integer *ilo, integer *ihi, double *a, integer *lda, double *tau, double *work,
  12168. integer *lwork, integer *info) {
  12169. /* Table of constant values */
  12170. static integer c__1 = 1;
  12171. static integer c_n1 = -1;
  12172. /* System generated locals */
  12173. integer a_dim1, a_offset, i__1, i__2;
  12174. /* Local variables */
  12175. static integer i__, j, iinfo, nb, nh;
  12176. static integer lwkopt;
  12177. static int lquery;
  12178. a_dim1 = *lda;
  12179. a_offset = 1 + a_dim1 * 1;
  12180. a -= a_offset;
  12181. --tau;
  12182. --work;
  12183. /* Function Body */
  12184. *info = 0;
  12185. nh = *ihi - *ilo;
  12186. lquery = *lwork == -1;
  12187. if (*n < 0) {
  12188. *info = -1;
  12189. } else if (*ilo < 1 || *ilo > MAX (1, *n)) {
  12190. *info = -2;
  12191. } else if (*ihi < MIN (*ilo, *n) || *ihi > *n) {
  12192. *info = -3;
  12193. } else if (*lda < MAX (1, *n)) {
  12194. *info = -5;
  12195. } else if (*lwork < MAX (1, nh) && !lquery) {
  12196. *info = -8;
  12197. }
  12198. if (*info == 0) {
  12199. nb = NUMlapack_ilaenv (&c__1, "DORGQR", " ", &nh, &nh, &nh, &c_n1, 6, 1);
  12200. lwkopt = MAX (1, nh) * nb;
  12201. work[1] = (double) lwkopt;
  12202. }
  12203. if (*info != 0) {
  12204. i__1 = - (*info);
  12205. xerbla_ ("DORGHR", &i__1);
  12206. return 0;
  12207. } else if (lquery) {
  12208. return 0;
  12209. }
  12210. /* Quick return if possible */
  12211. if (*n == 0) {
  12212. work[1] = 1.;
  12213. return 0;
  12214. }
  12215. /* Shift the vectors which define the elementary reflectors one column to
  12216. the right, and set the first ilo and the last n-ihi rows and columns
  12217. to those of the unit matrix */
  12218. i__1 = *ilo + 1;
  12219. for (j = *ihi; j >= i__1; --j) {
  12220. i__2 = j - 1;
  12221. for (i__ = 1; i__ <= i__2; ++i__) {
  12222. a_ref (i__, j) = 0.;
  12223. /* L10: */
  12224. }
  12225. i__2 = *ihi;
  12226. for (i__ = j + 1; i__ <= i__2; ++i__) {
  12227. a_ref (i__, j) = a_ref (i__, j - 1);
  12228. /* L20: */
  12229. }
  12230. i__2 = *n;
  12231. for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
  12232. a_ref (i__, j) = 0.;
  12233. /* L30: */
  12234. }
  12235. /* L40: */
  12236. }
  12237. i__1 = *ilo;
  12238. for (j = 1; j <= i__1; ++j) {
  12239. i__2 = *n;
  12240. for (i__ = 1; i__ <= i__2; ++i__) {
  12241. a_ref (i__, j) = 0.;
  12242. /* L50: */
  12243. }
  12244. a_ref (j, j) = 1.;
  12245. /* L60: */
  12246. }
  12247. i__1 = *n;
  12248. for (j = *ihi + 1; j <= i__1; ++j) {
  12249. i__2 = *n;
  12250. for (i__ = 1; i__ <= i__2; ++i__) {
  12251. a_ref (i__, j) = 0.;
  12252. /* L70: */
  12253. }
  12254. a_ref (j, j) = 1.;
  12255. /* L80: */
  12256. }
  12257. if (nh > 0) {
  12258. /* Generate Q(ilo+1:ihi,ilo+1:ihi) */
  12259. NUMlapack_dorgqr (&nh, &nh, &nh, &a_ref (*ilo + 1, *ilo + 1), lda, &tau[*ilo], &work[1], lwork,
  12260. &iinfo);
  12261. }
  12262. work[1] = (double) lwkopt;
  12263. return 0;
  12264. } /* NUMlapack_dorghr */
  12265. int NUMlapack_dorgl2 (integer *m, integer *n, integer *k, double *a, integer *lda, double *tau, double *work, integer *info) {
  12266. /* System generated locals */
  12267. integer a_dim1, a_offset, i__1, i__2;
  12268. double d__1;
  12269. /* Local variables */
  12270. static integer i__, j, l;
  12271. a_dim1 = *lda;
  12272. a_offset = 1 + a_dim1 * 1;
  12273. a -= a_offset;
  12274. --tau;
  12275. --work;
  12276. /* Function Body */
  12277. *info = 0;
  12278. if (*m < 0) {
  12279. *info = -1;
  12280. } else if (*n < *m) {
  12281. *info = -2;
  12282. } else if (*k < 0 || *k > *m) {
  12283. *info = -3;
  12284. } else if (*lda < MAX (1, *m)) {
  12285. *info = -5;
  12286. }
  12287. if (*info != 0) {
  12288. i__1 = - (*info);
  12289. xerbla_ ("DORGL2", &i__1);
  12290. return 0;
  12291. }
  12292. /* Quick return if possible */
  12293. if (*m <= 0) {
  12294. return 0;
  12295. }
  12296. if (*k < *m) {
  12297. /* Initialise rows k+1:m to rows of the unit matrix */
  12298. i__1 = *n;
  12299. for (j = 1; j <= i__1; ++j) {
  12300. i__2 = *m;
  12301. for (l = *k + 1; l <= i__2; ++l) {
  12302. a_ref (l, j) = 0.;
  12303. /* L10: */
  12304. }
  12305. if (j > *k && j <= *m) {
  12306. a_ref (j, j) = 1.;
  12307. }
  12308. /* L20: */
  12309. }
  12310. }
  12311. for (i__ = *k; i__ >= 1; --i__) {
  12312. /* Apply H(i) to A(i:m,i:n) from the right */
  12313. if (i__ < *n) {
  12314. if (i__ < *m) {
  12315. a_ref (i__, i__) = 1.;
  12316. i__1 = *m - i__;
  12317. i__2 = *n - i__ + 1;
  12318. NUMlapack_dlarf ("Right", &i__1, &i__2, &a_ref (i__, i__), lda, &tau[i__], &a_ref (i__ + 1,
  12319. i__), lda, &work[1]);
  12320. }
  12321. i__1 = *n - i__;
  12322. d__1 = -tau[i__];
  12323. NUMblas_dscal (&i__1, &d__1, &a_ref (i__, i__ + 1), lda);
  12324. }
  12325. a_ref (i__, i__) = 1. - tau[i__];
  12326. /* Set A(i,1:i-1) to zero */
  12327. i__1 = i__ - 1;
  12328. for (l = 1; l <= i__1; ++l) {
  12329. a_ref (i__, l) = 0.;
  12330. /* L30: */
  12331. }
  12332. /* L40: */
  12333. }
  12334. return 0;
  12335. } /* NUMlapack_dorgl2 */
  12336. int NUMlapack_dorglq (integer *m, integer *n, integer *k, double *a, integer *lda, double *tau, double *work, integer *lwork,
  12337. integer *info) {
  12338. /* Table of constant values */
  12339. static integer c__1 = 1;
  12340. static integer c_n1 = -1;
  12341. static integer c__3 = 3;
  12342. static integer c__2 = 2;
  12343. /* System generated locals */
  12344. integer a_dim1, a_offset, i__1, i__2, i__3;
  12345. /* Local variables */
  12346. static integer i__, j, l, nbmin, iinfo;
  12347. static integer ib, nb, ki, kk;
  12348. static integer nx;
  12349. static integer ldwork, lwkopt;
  12350. static integer lquery;
  12351. static integer iws;
  12352. a_dim1 = *lda;
  12353. a_offset = 1 + a_dim1 * 1;
  12354. a -= a_offset;
  12355. --tau;
  12356. --work;
  12357. /* Function Body */
  12358. *info = 0;
  12359. nb = NUMlapack_ilaenv (&c__1, "DORGLQ", " ", m, n, k, &c_n1, 6, 1);
  12360. lwkopt = MAX (1, *m) * nb;
  12361. work[1] = (double) lwkopt;
  12362. lquery = *lwork == -1;
  12363. if (*m < 0) {
  12364. *info = -1;
  12365. } else if (*n < *m) {
  12366. *info = -2;
  12367. } else if (*k < 0 || *k > *m) {
  12368. *info = -3;
  12369. } else if (*lda < MAX (1, *m)) {
  12370. *info = -5;
  12371. } else if (*lwork < MAX (1, *m) && !lquery) {
  12372. *info = -8;
  12373. }
  12374. if (*info != 0) {
  12375. i__1 = - (*info);
  12376. xerbla_ ("DORGLQ", &i__1);
  12377. return 0;
  12378. } else if (lquery) {
  12379. return 0;
  12380. }
  12381. /* Quick return if possible */
  12382. if (*m <= 0) {
  12383. work[1] = 1.;
  12384. return 0;
  12385. }
  12386. nbmin = 2;
  12387. nx = 0;
  12388. iws = *m;
  12389. if (nb > 1 && nb < *k) {
  12390. /* Determine when to cross over from blocked to unblocked code.
  12391. Computing MAX */
  12392. i__1 = 0, i__2 = NUMlapack_ilaenv (&c__3, "DORGLQ", " ", m, n, k, &c_n1, 6, 1);
  12393. nx = MAX (i__1, i__2);
  12394. if (nx < *k) {
  12395. /* Determine if workspace is large enough for blocked code. */
  12396. ldwork = *m;
  12397. iws = ldwork * nb;
  12398. if (*lwork < iws) {
  12399. /* Not enough workspace to use optimal NB: reduce NB and
  12400. determine the minimum value of NB. */
  12401. nb = *lwork / ldwork;
  12402. /* Computing MAX */
  12403. i__1 = 2, i__2 = NUMlapack_ilaenv (&c__2, "DORGLQ", " ", m, n, k, &c_n1, 6, 1);
  12404. nbmin = MAX (i__1, i__2);
  12405. }
  12406. }
  12407. }
  12408. if (nb >= nbmin && nb < *k && nx < *k) {
  12409. /* Use blocked code after the last block. The first kk rows are
  12410. handled by the block method. */
  12411. ki = (*k - nx - 1) / nb * nb;
  12412. /* Computing MIN */
  12413. i__1 = *k, i__2 = ki + nb;
  12414. kk = MIN (i__1, i__2);
  12415. /* Set A(kk+1:m,1:kk) to zero. */
  12416. i__1 = kk;
  12417. for (j = 1; j <= i__1; ++j) {
  12418. i__2 = *m;
  12419. for (i__ = kk + 1; i__ <= i__2; ++i__) {
  12420. a_ref (i__, j) = 0.;
  12421. /* L10: */
  12422. }
  12423. /* L20: */
  12424. }
  12425. } else {
  12426. kk = 0;
  12427. }
  12428. /* Use unblocked code for the last or only block. */
  12429. if (kk < *m) {
  12430. i__1 = *m - kk;
  12431. i__2 = *n - kk;
  12432. i__3 = *k - kk;
  12433. NUMlapack_dorgl2 (&i__1, &i__2, &i__3, &a_ref (kk + 1, kk + 1), lda, &tau[kk + 1], &work[1], &iinfo);
  12434. }
  12435. if (kk > 0) {
  12436. /* Use blocked code */
  12437. i__1 = -nb;
  12438. for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
  12439. /* Computing MIN */
  12440. i__2 = nb, i__3 = *k - i__ + 1;
  12441. ib = MIN (i__2, i__3);
  12442. if (i__ + ib <= *m) {
  12443. /* Form the triangular factor of the block reflector H = H(i)
  12444. H(i+1) . . . H(i+ib-1) */
  12445. i__2 = *n - i__ + 1;
  12446. NUMlapack_dlarft ("Forward", "Rowwise", &i__2, &ib, &a_ref (i__, i__), lda, &tau[i__], &work[1],
  12447. &ldwork);
  12448. /* Apply H' to A(i+ib:m,i:n) from the right */
  12449. i__2 = *m - i__ - ib + 1;
  12450. i__3 = *n - i__ + 1;
  12451. NUMlapack_dlarfb ("Right", "Transpose", "Forward", "Rowwise", &i__2, &i__3, &ib, &a_ref (i__,
  12452. i__), lda, &work[1], &ldwork, &a_ref (i__ + ib, i__), lda, &work[ib + 1], &ldwork);
  12453. }
  12454. /* Apply H' to columns i:n of current block */
  12455. i__2 = *n - i__ + 1;
  12456. NUMlapack_dorgl2 (&ib, &i__2, &ib, &a_ref (i__, i__), lda, &tau[i__], &work[1], &iinfo);
  12457. /* Set columns 1:i-1 of current block to zero */
  12458. i__2 = i__ - 1;
  12459. for (j = 1; j <= i__2; ++j) {
  12460. i__3 = i__ + ib - 1;
  12461. for (l = i__; l <= i__3; ++l) {
  12462. a_ref (l, j) = 0.;
  12463. /* L30: */
  12464. }
  12465. /* L40: */
  12466. }
  12467. /* L50: */
  12468. }
  12469. }
  12470. work[1] = (double) iws;
  12471. return 0;
  12472. } /* NUMlapack_dorglq */
  12473. int NUMlapack_dorgql (integer *m, integer *n, integer *k, double *a, integer *lda, double *tau, double *work, integer *lwork,
  12474. integer *info) {
  12475. /* Table of constant values */
  12476. static integer c__1 = 1;
  12477. static integer c_n1 = -1;
  12478. static integer c__3 = 3;
  12479. static integer c__2 = 2;
  12480. /* System generated locals */
  12481. integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
  12482. /* Local variables */
  12483. static integer i__, j, l, nbmin, iinfo;
  12484. static integer ib, nb, kk;
  12485. static integer nx;
  12486. static integer ldwork, lwkopt;
  12487. static integer lquery;
  12488. static integer iws;
  12489. a_dim1 = *lda;
  12490. a_offset = 1 + a_dim1 * 1;
  12491. a -= a_offset;
  12492. --tau;
  12493. --work;
  12494. /* Function Body */
  12495. *info = 0;
  12496. nb = NUMlapack_ilaenv (&c__1, "DORGQL", " ", m, n, k, &c_n1, 6, 1);
  12497. lwkopt = MAX (1, *n) * nb;
  12498. work[1] = (double) lwkopt;
  12499. lquery = *lwork == -1;
  12500. if (*m < 0) {
  12501. *info = -1;
  12502. } else if (*n < 0 || *n > *m) {
  12503. *info = -2;
  12504. } else if (*k < 0 || *k > *n) {
  12505. *info = -3;
  12506. } else if (*lda < MAX (1, *m)) {
  12507. *info = -5;
  12508. } else if (*lwork < MAX (1, *n) && !lquery) {
  12509. *info = -8;
  12510. }
  12511. if (*info != 0) {
  12512. i__1 = - (*info);
  12513. xerbla_ ("DORGQL", &i__1);
  12514. return 0;
  12515. } else if (lquery) {
  12516. return 0;
  12517. }
  12518. /* Quick return if possible */
  12519. if (*n <= 0) {
  12520. work[1] = 1.;
  12521. return 0;
  12522. }
  12523. nbmin = 2;
  12524. nx = 0;
  12525. iws = *n;
  12526. if (nb > 1 && nb < *k) {
  12527. /* Determine when to cross over from blocked to unblocked code.
  12528. Computing MAX */
  12529. i__1 = 0, i__2 = NUMlapack_ilaenv (&c__3, "DORGQL", " ", m, n, k, &c_n1, 6, 1);
  12530. nx = MAX (i__1, i__2);
  12531. if (nx < *k) {
  12532. /* Determine if workspace is large enough for blocked code. */
  12533. ldwork = *n;
  12534. iws = ldwork * nb;
  12535. if (*lwork < iws) {
  12536. /* Not enough workspace to use optimal NB: reduce NB and
  12537. determine the minimum value of NB. */
  12538. nb = *lwork / ldwork;
  12539. /* Computing MAX */
  12540. i__1 = 2, i__2 = NUMlapack_ilaenv (&c__2, "DORGQL", " ", m, n, k, &c_n1, 6, 1);
  12541. nbmin = MAX (i__1, i__2);
  12542. }
  12543. }
  12544. }
  12545. if (nb >= nbmin && nb < *k && nx < *k) {
  12546. /* Use blocked code after the first block. The last kk columns are
  12547. handled by the block method.
  12548. Computing MIN */
  12549. i__1 = *k, i__2 = (*k - nx + nb - 1) / nb * nb;
  12550. kk = MIN (i__1, i__2);
  12551. /* Set A(m-kk+1:m,1:n-kk) to zero. */
  12552. i__1 = *n - kk;
  12553. for (j = 1; j <= i__1; ++j) {
  12554. i__2 = *m;
  12555. for (i__ = *m - kk + 1; i__ <= i__2; ++i__) {
  12556. a_ref (i__, j) = 0.;
  12557. /* L10: */
  12558. }
  12559. /* L20: */
  12560. }
  12561. } else {
  12562. kk = 0;
  12563. }
  12564. /* Use unblocked code for the first or only block. */
  12565. i__1 = *m - kk;
  12566. i__2 = *n - kk;
  12567. i__3 = *k - kk;
  12568. NUMlapack_dorg2l (&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
  12569. if (kk > 0) {
  12570. /* Use blocked code */
  12571. i__1 = *k;
  12572. i__2 = nb;
  12573. for (i__ = *k - kk + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  12574. /* Computing MIN */
  12575. i__3 = nb, i__4 = *k - i__ + 1;
  12576. ib = MIN (i__3, i__4);
  12577. if (*n - *k + i__ > 1) {
  12578. /* Form the triangular factor of the block reflector H =
  12579. H(i+ib-1) . . . H(i+1) H(i) */
  12580. i__3 = *m - *k + i__ + ib - 1;
  12581. NUMlapack_dlarft ("Backward", "Columnwise", &i__3, &ib, &a_ref (1, *n - *k + i__), lda,
  12582. &tau[i__], &work[1], &ldwork);
  12583. /* Apply H to A(1:m-k+i+ib-1,1:n-k+i-1) from the left */
  12584. i__3 = *m - *k + i__ + ib - 1;
  12585. i__4 = *n - *k + i__ - 1;
  12586. NUMlapack_dlarfb ("Left", "No transpose", "Backward", "Columnwise", &i__3, &i__4, &ib,
  12587. &a_ref (1, *n - *k + i__), lda, &work[1], &ldwork, &a[a_offset], lda, &work[ib + 1],
  12588. &ldwork);
  12589. }
  12590. /* Apply H to rows 1:m-k+i+ib-1 of current block */
  12591. i__3 = *m - *k + i__ + ib - 1;
  12592. NUMlapack_dorg2l (&i__3, &ib, &ib, &a_ref (1, *n - *k + i__), lda, &tau[i__], &work[1], &iinfo);
  12593. /* Set rows m-k+i+ib:m of current block to zero */
  12594. i__3 = *n - *k + i__ + ib - 1;
  12595. for (j = *n - *k + i__; j <= i__3; ++j) {
  12596. i__4 = *m;
  12597. for (l = *m - *k + i__ + ib; l <= i__4; ++l) {
  12598. a_ref (l, j) = 0.;
  12599. /* L30: */
  12600. }
  12601. /* L40: */
  12602. }
  12603. /* L50: */
  12604. }
  12605. }
  12606. work[1] = (double) iws;
  12607. return 0;
  12608. } /* NUMlapack_dorgql */
  12609. int NUMlapack_dorgqr (integer *m, integer *n, integer *k, double *a, integer *lda, double *tau, double *work, integer *lwork,
  12610. integer *info) {
  12611. /* Table of constant values */
  12612. static integer c__1 = 1;
  12613. static integer c_n1 = -1;
  12614. static integer c__3 = 3;
  12615. static integer c__2 = 2;
  12616. /* System generated locals */
  12617. integer a_dim1, a_offset, i__1, i__2, i__3;
  12618. /* Local variables */
  12619. static integer i__, j, l, nbmin, iinfo;
  12620. static integer ib, nb, ki, kk;
  12621. static integer nx;
  12622. static integer ldwork, lwkopt;
  12623. static integer lquery;
  12624. static integer iws;
  12625. a_dim1 = *lda;
  12626. a_offset = 1 + a_dim1 * 1;
  12627. a -= a_offset;
  12628. --tau;
  12629. --work;
  12630. /* Function Body */
  12631. *info = 0;
  12632. nb = NUMlapack_ilaenv (&c__1, "DORGQR", " ", m, n, k, &c_n1, 6, 1);
  12633. lwkopt = MAX (1, *n) * nb;
  12634. work[1] = (double) lwkopt;
  12635. lquery = *lwork == -1;
  12636. if (*m < 0) {
  12637. *info = -1;
  12638. } else if (*n < 0 || *n > *m) {
  12639. *info = -2;
  12640. } else if (*k < 0 || *k > *n) {
  12641. *info = -3;
  12642. } else if (*lda < MAX (1, *m)) {
  12643. *info = -5;
  12644. } else if (*lwork < MAX (1, *n) && !lquery) {
  12645. *info = -8;
  12646. }
  12647. if (*info != 0) {
  12648. i__1 = - (*info);
  12649. xerbla_ ("DORGQR", &i__1);
  12650. return 0;
  12651. } else if (lquery) {
  12652. return 0;
  12653. }
  12654. /* Quick return if possible */
  12655. if (*n <= 0) {
  12656. work[1] = 1.;
  12657. return 0;
  12658. }
  12659. nbmin = 2;
  12660. nx = 0;
  12661. iws = *n;
  12662. if (nb > 1 && nb < *k) {
  12663. /* Determine when to cross over from blocked to unblocked code.
  12664. Computing MAX */
  12665. i__1 = 0, i__2 = NUMlapack_ilaenv (&c__3, "DORGQR", " ", m, n, k, &c_n1, 6, 1);
  12666. nx = MAX (i__1, i__2);
  12667. if (nx < *k) {
  12668. /* Determine if workspace is large enough for blocked code. */
  12669. ldwork = *n;
  12670. iws = ldwork * nb;
  12671. if (*lwork < iws) {
  12672. /* Not enough workspace to use optimal NB: reduce NB and
  12673. determine the minimum value of NB. */
  12674. nb = *lwork / ldwork;
  12675. /* Computing MAX */
  12676. i__1 = 2, i__2 = NUMlapack_ilaenv (&c__2, "DORGQR", " ", m, n, k, &c_n1, 6, 1);
  12677. nbmin = MAX (i__1, i__2);
  12678. }
  12679. }
  12680. }
  12681. if (nb >= nbmin && nb < *k && nx < *k) {
  12682. /* Use blocked code after the last block. The first kk columns are
  12683. handled by the block method. */
  12684. ki = (*k - nx - 1) / nb * nb;
  12685. /* Computing MIN */
  12686. i__1 = *k, i__2 = ki + nb;
  12687. kk = MIN (i__1, i__2);
  12688. /* Set A(1:kk,kk+1:n) to zero. */
  12689. i__1 = *n;
  12690. for (j = kk + 1; j <= i__1; ++j) {
  12691. i__2 = kk;
  12692. for (i__ = 1; i__ <= i__2; ++i__) {
  12693. a_ref (i__, j) = 0.;
  12694. /* L10: */
  12695. }
  12696. /* L20: */
  12697. }
  12698. } else {
  12699. kk = 0;
  12700. }
  12701. /* Use unblocked code for the last or only block. */
  12702. if (kk < *n) {
  12703. i__1 = *m - kk;
  12704. i__2 = *n - kk;
  12705. i__3 = *k - kk;
  12706. NUMlapack_dorg2r (&i__1, &i__2, &i__3, &a_ref (kk + 1, kk + 1), lda, &tau[kk + 1], &work[1], &iinfo);
  12707. }
  12708. if (kk > 0) {
  12709. /* Use blocked code */
  12710. i__1 = -nb;
  12711. for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
  12712. /* Computing MIN */
  12713. i__2 = nb, i__3 = *k - i__ + 1;
  12714. ib = MIN (i__2, i__3);
  12715. if (i__ + ib <= *n) {
  12716. /* Form the triangular factor of the block reflector H = H(i)
  12717. H(i+1) . . . H(i+ib-1) */
  12718. i__2 = *m - i__ + 1;
  12719. NUMlapack_dlarft ("Forward", "Columnwise", &i__2, &ib, &a_ref (i__, i__), lda, &tau[i__],
  12720. &work[1], &ldwork);
  12721. /* Apply H to A(i:m,i+ib:n) from the left */
  12722. i__2 = *m - i__ + 1;
  12723. i__3 = *n - i__ - ib + 1;
  12724. NUMlapack_dlarfb ("Left", "No transpose", "Forward", "Columnwise", &i__2, &i__3, &ib,
  12725. &a_ref (i__, i__), lda, &work[1], &ldwork, &a_ref (i__, i__ + ib), lda, &work[ib + 1],
  12726. &ldwork);
  12727. }
  12728. /* Apply H to rows i:m of current block */
  12729. i__2 = *m - i__ + 1;
  12730. NUMlapack_dorg2r (&i__2, &ib, &ib, &a_ref (i__, i__), lda, &tau[i__], &work[1], &iinfo);
  12731. /* Set rows 1:i-1 of current block to zero */
  12732. i__2 = i__ + ib - 1;
  12733. for (j = i__; j <= i__2; ++j) {
  12734. i__3 = i__ - 1;
  12735. for (l = 1; l <= i__3; ++l) {
  12736. a_ref (l, j) = 0.;
  12737. /* L30: */
  12738. }
  12739. /* L40: */
  12740. }
  12741. /* L50: */
  12742. }
  12743. }
  12744. work[1] = (double) iws;
  12745. return 0;
  12746. } /* NUMlapack_dorgqr */
  12747. int NUMlapack_dorgtr (const char *uplo, integer *n, double *a, integer *lda, double *tau, double *work, integer *lwork,
  12748. integer *info) {
  12749. /* Table of constant values */
  12750. static integer c__1 = 1;
  12751. static integer c_n1 = -1;
  12752. /* System generated locals */
  12753. integer a_dim1, a_offset, i__1, i__2, i__3;
  12754. /* Local variables */
  12755. static integer i__, j;
  12756. static integer iinfo;
  12757. static integer upper;
  12758. static integer nb;
  12759. static integer lwkopt;
  12760. static integer lquery;
  12761. a_dim1 = *lda;
  12762. a_offset = 1 + a_dim1 * 1;
  12763. a -= a_offset;
  12764. --tau;
  12765. --work;
  12766. /* Function Body */
  12767. *info = 0;
  12768. lquery = *lwork == -1;
  12769. upper = lsame_ (uplo, "U");
  12770. if (!upper && !lsame_ (uplo, "L")) {
  12771. *info = -1;
  12772. } else if (*n < 0) {
  12773. *info = -2;
  12774. } else if (*lda < MAX (1, *n)) {
  12775. *info = -4;
  12776. } else { /* if(complicated condition) */
  12777. /* Computing MAX */
  12778. i__1 = 1, i__2 = *n - 1;
  12779. if (*lwork < MAX (i__1, i__2) && !lquery) {
  12780. *info = -7;
  12781. }
  12782. }
  12783. if (*info == 0) {
  12784. if (upper) {
  12785. i__1 = *n - 1;
  12786. i__2 = *n - 1;
  12787. i__3 = *n - 1;
  12788. nb = NUMlapack_ilaenv (&c__1, "DORGQL", " ", &i__1, &i__2, &i__3, &c_n1, 6, 1);
  12789. } else {
  12790. i__1 = *n - 1;
  12791. i__2 = *n - 1;
  12792. i__3 = *n - 1;
  12793. nb = NUMlapack_ilaenv (&c__1, "DORGQR", " ", &i__1, &i__2, &i__3, &c_n1, 6, 1);
  12794. }
  12795. /* Computing MAX */
  12796. i__1 = 1, i__2 = *n - 1;
  12797. lwkopt = MAX (i__1, i__2) * nb;
  12798. work[1] = (double) lwkopt;
  12799. }
  12800. if (*info != 0) {
  12801. i__1 = - (*info);
  12802. xerbla_ ("DORGTR", &i__1);
  12803. return 0;
  12804. } else if (lquery) {
  12805. return 0;
  12806. }
  12807. /* Quick return if possible */
  12808. if (*n == 0) {
  12809. work[1] = 1.;
  12810. return 0;
  12811. }
  12812. if (upper) {
  12813. /* Q was determined by a call to DSYTRD with UPLO = 'U'
  12814. Shift the vectors which define the elementary reflectors one
  12815. column to the left, and set the last row and column of Q to those
  12816. of the unit matrix */
  12817. i__1 = *n - 1;
  12818. for (j = 1; j <= i__1; ++j) {
  12819. i__2 = j - 1;
  12820. for (i__ = 1; i__ <= i__2; ++i__) {
  12821. a_ref (i__, j) = a_ref (i__, j + 1);
  12822. /* L10: */
  12823. }
  12824. a_ref (*n, j) = 0.;
  12825. /* L20: */
  12826. }
  12827. i__1 = *n - 1;
  12828. for (i__ = 1; i__ <= i__1; ++i__) {
  12829. a_ref (i__, *n) = 0.;
  12830. /* L30: */
  12831. }
  12832. a_ref (*n, *n) = 1.;
  12833. /* Generate Q(1:n-1,1:n-1) */
  12834. i__1 = *n - 1;
  12835. i__2 = *n - 1;
  12836. i__3 = *n - 1;
  12837. NUMlapack_dorgql (&i__1, &i__2, &i__3, &a[a_offset], lda, &tau[1], &work[1], lwork, &iinfo);
  12838. } else {
  12839. /* Q was determined by a call to DSYTRD with UPLO = 'L'.
  12840. Shift the vectors which define the elementary reflectors one
  12841. column to the right, and set the first row and column of Q to
  12842. those of the unit matrix */
  12843. for (j = *n; j >= 2; --j) {
  12844. a_ref (1, j) = 0.;
  12845. i__1 = *n;
  12846. for (i__ = j + 1; i__ <= i__1; ++i__) {
  12847. a_ref (i__, j) = a_ref (i__, j - 1);
  12848. /* L40: */
  12849. }
  12850. /* L50: */
  12851. }
  12852. a_ref (1, 1) = 1.;
  12853. i__1 = *n;
  12854. for (i__ = 2; i__ <= i__1; ++i__) {
  12855. a_ref (i__, 1) = 0.;
  12856. /* L60: */
  12857. }
  12858. if (*n > 1) {
  12859. /* Generate Q(2:n,2:n) */
  12860. i__1 = *n - 1;
  12861. i__2 = *n - 1;
  12862. i__3 = *n - 1;
  12863. NUMlapack_dorgqr (&i__1, &i__2, &i__3, &a_ref (2, 2), lda, &tau[1], &work[1], lwork, &iinfo);
  12864. }
  12865. }
  12866. work[1] = (double) lwkopt;
  12867. return 0;
  12868. } /* NUMlapack_dorgtr */
  12869. int NUMlapack_dorm2r (const char *side, const char *trans, integer *m, integer *n, integer *k, double *a, integer *lda, double *tau,
  12870. double *c__, integer *ldc, double *work, integer *info) {
  12871. /* Table of constant values */
  12872. static integer c__1 = 1;
  12873. /* System generated locals */
  12874. integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
  12875. /* Local variables */
  12876. static integer left;
  12877. static integer i__;
  12878. static integer i1, i2, i3, ic, jc, mi, ni, nq;
  12879. static integer notran;
  12880. static double aii;
  12881. a_dim1 = *lda;
  12882. a_offset = 1 + a_dim1 * 1;
  12883. a -= a_offset;
  12884. --tau;
  12885. c_dim1 = *ldc;
  12886. c_offset = 1 + c_dim1 * 1;
  12887. c__ -= c_offset;
  12888. --work;
  12889. /* Function Body */
  12890. *info = 0;
  12891. left = lsame_ (side, "L");
  12892. notran = lsame_ (trans, "N");
  12893. /* NQ is the order of Q */
  12894. if (left) {
  12895. nq = *m;
  12896. } else {
  12897. nq = *n;
  12898. }
  12899. if (!left && !lsame_ (side, "R")) {
  12900. *info = -1;
  12901. } else if (!notran && !lsame_ (trans, "T")) {
  12902. *info = -2;
  12903. } else if (*m < 0) {
  12904. *info = -3;
  12905. } else if (*n < 0) {
  12906. *info = -4;
  12907. } else if (*k < 0 || *k > nq) {
  12908. *info = -5;
  12909. } else if (*lda < MAX (1, nq)) {
  12910. *info = -7;
  12911. } else if (*ldc < MAX (1, *m)) {
  12912. *info = -10;
  12913. }
  12914. if (*info != 0) {
  12915. i__1 = - (*info);
  12916. xerbla_ ("DORM2R", &i__1);
  12917. return 0;
  12918. }
  12919. /* Quick return if possible */
  12920. if (*m == 0 || *n == 0 || *k == 0) {
  12921. return 0;
  12922. }
  12923. if (left && !notran || !left && notran) {
  12924. i1 = 1;
  12925. i2 = *k;
  12926. i3 = 1;
  12927. } else {
  12928. i1 = *k;
  12929. i2 = 1;
  12930. i3 = -1;
  12931. }
  12932. if (left) {
  12933. ni = *n;
  12934. jc = 1;
  12935. } else {
  12936. mi = *m;
  12937. ic = 1;
  12938. }
  12939. i__1 = i2;
  12940. i__2 = i3;
  12941. for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  12942. if (left) {
  12943. /* H(i) is applied to C(i:m,1:n) */
  12944. mi = *m - i__ + 1;
  12945. ic = i__;
  12946. } else {
  12947. /* H(i) is applied to C(1:m,i:n) */
  12948. ni = *n - i__ + 1;
  12949. jc = i__;
  12950. }
  12951. /* Apply H(i) */
  12952. aii = a_ref (i__, i__);
  12953. a_ref (i__, i__) = 1.;
  12954. NUMlapack_dlarf (side, &mi, &ni, &a_ref (i__, i__), &c__1, &tau[i__], &c___ref (ic, jc), ldc, &work[1]);
  12955. a_ref (i__, i__) = aii;
  12956. /* L10: */
  12957. }
  12958. return 0;
  12959. } /* NUMlapack_dorm2r */
  12960. int NUMlapack_dormbr (const char *vect, const char *side, const char *trans, integer *m, integer *n, integer *k, double *a, integer *lda,
  12961. double *tau, double *c__, integer *ldc, double *work, integer *lwork, integer *info) {
  12962. /* Table of constant values */
  12963. static integer c__1 = 1;
  12964. static integer c_n1 = -1;
  12965. static integer c__2 = 2;
  12966. /* System generated locals */
  12967. const char *a__1[2];
  12968. integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
  12969. char ch__1[2];
  12970. /* Local variables */
  12971. static integer left;
  12972. static integer iinfo, i1, i2, nb, mi, ni, nq, nw;
  12973. static integer notran;
  12974. static integer applyq;
  12975. static char transt[1];
  12976. static integer lwkopt;
  12977. static integer lquery;
  12978. a_dim1 = *lda;
  12979. a_offset = 1 + a_dim1 * 1;
  12980. a -= a_offset;
  12981. --tau;
  12982. c_dim1 = *ldc;
  12983. c_offset = 1 + c_dim1 * 1;
  12984. c__ -= c_offset;
  12985. --work;
  12986. /* Function Body */
  12987. *info = 0;
  12988. applyq = lsame_ (vect, "Q");
  12989. left = lsame_ (side, "L");
  12990. notran = lsame_ (trans, "N");
  12991. lquery = *lwork == -1;
  12992. /* NQ is the order of Q or P and NW is the minimum dimension of WORK */
  12993. if (left) {
  12994. nq = *m;
  12995. nw = *n;
  12996. } else {
  12997. nq = *n;
  12998. nw = *m;
  12999. }
  13000. if (!applyq && !lsame_ (vect, "P")) {
  13001. *info = -1;
  13002. } else if (!left && !lsame_ (side, "R")) {
  13003. *info = -2;
  13004. } else if (!notran && !lsame_ (trans, "T")) {
  13005. *info = -3;
  13006. } else if (*m < 0) {
  13007. *info = -4;
  13008. } else if (*n < 0) {
  13009. *info = -5;
  13010. } else if (*k < 0) {
  13011. *info = -6;
  13012. } else { /* if(complicated condition) */
  13013. /* Computing MAX */
  13014. i__1 = 1, i__2 = MIN (nq, *k);
  13015. if (applyq && *lda < MAX (1, nq) || !applyq && *lda < MAX (i__1, i__2)) {
  13016. *info = -8;
  13017. } else if (*ldc < MAX (1, *m)) {
  13018. *info = -11;
  13019. } else if (*lwork < MAX (1, nw) && !lquery) {
  13020. *info = -13;
  13021. }
  13022. }
  13023. if (*info == 0) {
  13024. if (applyq) {
  13025. if (left) {
  13026. /* Writing concatenation */
  13027. i__3[0] = 1, a__1[0] = side;
  13028. i__3[1] = 1, a__1[1] = trans;
  13029. s_cat (ch__1, a__1, i__3, &c__2, 2);
  13030. i__1 = *m - 1;
  13031. i__2 = *m - 1;
  13032. nb = NUMlapack_ilaenv (&c__1, "DORMQR", ch__1, &i__1, n, &i__2, &c_n1, 6, 2);
  13033. } else {
  13034. /* Writing concatenation */
  13035. i__3[0] = 1, a__1[0] = side;
  13036. i__3[1] = 1, a__1[1] = trans;
  13037. s_cat (ch__1, a__1, i__3, &c__2, 2);
  13038. i__1 = *n - 1;
  13039. i__2 = *n - 1;
  13040. nb = NUMlapack_ilaenv (&c__1, "DORMQR", ch__1, m, &i__1, &i__2, &c_n1, 6, 2);
  13041. }
  13042. } else {
  13043. if (left) {
  13044. /* Writing concatenation */
  13045. i__3[0] = 1, a__1[0] = side;
  13046. i__3[1] = 1, a__1[1] = trans;
  13047. s_cat (ch__1, a__1, i__3, &c__2, 2);
  13048. i__1 = *m - 1;
  13049. i__2 = *m - 1;
  13050. nb = NUMlapack_ilaenv (&c__1, "DORMLQ", ch__1, &i__1, n, &i__2, &c_n1, 6, 2);
  13051. } else {
  13052. /* Writing concatenation */
  13053. i__3[0] = 1, a__1[0] = side;
  13054. i__3[1] = 1, a__1[1] = trans;
  13055. s_cat (ch__1, a__1, i__3, &c__2, 2);
  13056. i__1 = *n - 1;
  13057. i__2 = *n - 1;
  13058. nb = NUMlapack_ilaenv (&c__1, "DORMLQ", ch__1, m, &i__1, &i__2, &c_n1, 6, 2);
  13059. }
  13060. }
  13061. lwkopt = MAX (1, nw) * nb;
  13062. work[1] = (double) lwkopt;
  13063. }
  13064. if (*info != 0) {
  13065. i__1 = - (*info);
  13066. xerbla_ ("DORMBR", &i__1);
  13067. return 0;
  13068. } else if (lquery) {
  13069. return 0;
  13070. }
  13071. /* Quick return if possible */
  13072. work[1] = 1.;
  13073. if (*m == 0 || *n == 0) {
  13074. return 0;
  13075. }
  13076. if (applyq) {
  13077. /* Apply Q */
  13078. if (nq >= *k) {
  13079. /* Q was determined by a call to DGEBRD with nq >= k */
  13080. NUMlapack_dormqr (side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[c_offset], ldc, &work[1],
  13081. lwork, &iinfo);
  13082. } else if (nq > 1) {
  13083. /* Q was determined by a call to DGEBRD with nq < k */
  13084. if (left) {
  13085. mi = *m - 1;
  13086. ni = *n;
  13087. i1 = 2;
  13088. i2 = 1;
  13089. } else {
  13090. mi = *m;
  13091. ni = *n - 1;
  13092. i1 = 1;
  13093. i2 = 2;
  13094. }
  13095. i__1 = nq - 1;
  13096. NUMlapack_dormqr (side, trans, &mi, &ni, &i__1, &a_ref (2, 1), lda, &tau[1], &c___ref (i1, i2), ldc,
  13097. &work[1], lwork, &iinfo);
  13098. }
  13099. } else {
  13100. /* Apply P */
  13101. if (notran) {
  13102. * (unsigned char *) transt = 'T';
  13103. } else {
  13104. * (unsigned char *) transt = 'N';
  13105. }
  13106. if (nq > *k) {
  13107. /* P was determined by a call to DGEBRD with nq > k */
  13108. NUMlapack_dormlq (side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[c_offset], ldc, &work[1],
  13109. lwork, &iinfo);
  13110. } else if (nq > 1) {
  13111. /* P was determined by a call to DGEBRD with nq <= k */
  13112. if (left) {
  13113. mi = *m - 1;
  13114. ni = *n;
  13115. i1 = 2;
  13116. i2 = 1;
  13117. } else {
  13118. mi = *m;
  13119. ni = *n - 1;
  13120. i1 = 1;
  13121. i2 = 2;
  13122. }
  13123. i__1 = nq - 1;
  13124. NUMlapack_dormlq (side, transt, &mi, &ni, &i__1, &a_ref (1, 2), lda, &tau[1], &c___ref (i1, i2),
  13125. ldc, &work[1], lwork, &iinfo);
  13126. }
  13127. }
  13128. work[1] = (double) lwkopt;
  13129. return 0;
  13130. } /* NUMlapack_dormbr */
  13131. int NUMlapack_dorml2 (const char *side, const char *trans, integer *m, integer *n, integer *k, double *a, integer *lda, double *tau,
  13132. double *c__, integer *ldc, double *work, integer *info) {
  13133. /* System generated locals */
  13134. integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
  13135. /* Local variables */
  13136. static integer left;
  13137. static integer i__;
  13138. static integer i1, i2, i3, ic, jc, mi, ni, nq;
  13139. static integer notran;
  13140. static double aii;
  13141. a_dim1 = *lda;
  13142. a_offset = 1 + a_dim1 * 1;
  13143. a -= a_offset;
  13144. --tau;
  13145. c_dim1 = *ldc;
  13146. c_offset = 1 + c_dim1 * 1;
  13147. c__ -= c_offset;
  13148. --work;
  13149. /* Function Body */
  13150. *info = 0;
  13151. left = lsame_ (side, "L");
  13152. notran = lsame_ (trans, "N");
  13153. /* NQ is the order of Q */
  13154. if (left) {
  13155. nq = *m;
  13156. } else {
  13157. nq = *n;
  13158. }
  13159. if (!left && !lsame_ (side, "R")) {
  13160. *info = -1;
  13161. } else if (!notran && !lsame_ (trans, "T")) {
  13162. *info = -2;
  13163. } else if (*m < 0) {
  13164. *info = -3;
  13165. } else if (*n < 0) {
  13166. *info = -4;
  13167. } else if (*k < 0 || *k > nq) {
  13168. *info = -5;
  13169. } else if (*lda < MAX (1, *k)) {
  13170. *info = -7;
  13171. } else if (*ldc < MAX (1, *m)) {
  13172. *info = -10;
  13173. }
  13174. if (*info != 0) {
  13175. i__1 = - (*info);
  13176. xerbla_ ("DORML2", &i__1);
  13177. return 0;
  13178. }
  13179. /* Quick return if possible */
  13180. if (*m == 0 || *n == 0 || *k == 0) {
  13181. return 0;
  13182. }
  13183. if (left && notran || !left && !notran) {
  13184. i1 = 1;
  13185. i2 = *k;
  13186. i3 = 1;
  13187. } else {
  13188. i1 = *k;
  13189. i2 = 1;
  13190. i3 = -1;
  13191. }
  13192. if (left) {
  13193. ni = *n;
  13194. jc = 1;
  13195. } else {
  13196. mi = *m;
  13197. ic = 1;
  13198. }
  13199. i__1 = i2;
  13200. i__2 = i3;
  13201. for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  13202. if (left) {
  13203. /* H(i) is applied to C(i:m,1:n) */
  13204. mi = *m - i__ + 1;
  13205. ic = i__;
  13206. } else {
  13207. /* H(i) is applied to C(1:m,i:n) */
  13208. ni = *n - i__ + 1;
  13209. jc = i__;
  13210. }
  13211. /* Apply H(i) */
  13212. aii = a_ref (i__, i__);
  13213. a_ref (i__, i__) = 1.;
  13214. NUMlapack_dlarf (side, &mi, &ni, &a_ref (i__, i__), lda, &tau[i__], &c___ref (ic, jc), ldc, &work[1]);
  13215. a_ref (i__, i__) = aii;
  13216. /* L10: */
  13217. }
  13218. return 0;
  13219. } /* NUMlapack_dorml2 */
  13220. int NUMlapack_dormlq (const char *side, const char *trans, integer *m, integer *n, integer *k, double *a, integer *lda, double *tau,
  13221. double *c__, integer *ldc, double *work, integer *lwork, integer *info) {
  13222. /* Table of constant values */
  13223. static integer c__1 = 1;
  13224. static integer c_n1 = -1;
  13225. static integer c__2 = 2;
  13226. static integer c__65 = 65;
  13227. /* System generated locals */
  13228. char *a__1[2];
  13229. integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, i__5;
  13230. char ch__1[2];
  13231. /* Local variables */
  13232. static integer left;
  13233. static integer i__;
  13234. static double t[4160] /* was [65][64] */ ;
  13235. static integer nbmin, iinfo, i1, i2, i3;
  13236. static integer ib, ic, jc, nb, mi, ni;
  13237. static integer nq, nw;
  13238. static integer notran;
  13239. static integer ldwork;
  13240. static char transt[1];
  13241. static integer lwkopt;
  13242. static integer lquery;
  13243. static integer iws;
  13244. a_dim1 = *lda;
  13245. a_offset = 1 + a_dim1 * 1;
  13246. a -= a_offset;
  13247. --tau;
  13248. c_dim1 = *ldc;
  13249. c_offset = 1 + c_dim1 * 1;
  13250. c__ -= c_offset;
  13251. --work;
  13252. /* Function Body */
  13253. *info = 0;
  13254. left = lsame_ (side, "L");
  13255. notran = lsame_ (trans, "N");
  13256. lquery = *lwork == -1;
  13257. /* NQ is the order of Q and NW is the minimum dimension of WORK */
  13258. if (left) {
  13259. nq = *m;
  13260. nw = *n;
  13261. } else {
  13262. nq = *n;
  13263. nw = *m;
  13264. }
  13265. if (!left && !lsame_ (side, "R")) {
  13266. *info = -1;
  13267. } else if (!notran && !lsame_ (trans, "T")) {
  13268. *info = -2;
  13269. } else if (*m < 0) {
  13270. *info = -3;
  13271. } else if (*n < 0) {
  13272. *info = -4;
  13273. } else if (*k < 0 || *k > nq) {
  13274. *info = -5;
  13275. } else if (*lda < MAX (1, *k)) {
  13276. *info = -7;
  13277. } else if (*ldc < MAX (1, *m)) {
  13278. *info = -10;
  13279. } else if (*lwork < MAX (1, nw) && !lquery) {
  13280. *info = -12;
  13281. }
  13282. if (*info == 0) {
  13283. /* Determine the block size. NB may be at most NBMAX, where NBMAX is
  13284. used to define the local array T.
  13285. Computing MIN Writing concatenation */
  13286. i__3[0] = 1, a__1[0] = (char *) side;
  13287. i__3[1] = 1, a__1[1] = (char *) trans;
  13288. s_cat (ch__1, (const char **) a__1, i__3, &c__2, 2);
  13289. i__1 = 64, i__2 = NUMlapack_ilaenv (&c__1, "DORMLQ", ch__1, m, n, k, &c_n1, 6, 2);
  13290. nb = MIN (i__1, i__2);
  13291. lwkopt = MAX (1, nw) * nb;
  13292. work[1] = (double) lwkopt;
  13293. }
  13294. if (*info != 0) {
  13295. i__1 = - (*info);
  13296. xerbla_ ("DORMLQ", &i__1);
  13297. return 0;
  13298. } else if (lquery) {
  13299. return 0;
  13300. }
  13301. /* Quick return if possible */
  13302. if (*m == 0 || *n == 0 || *k == 0) {
  13303. work[1] = 1.;
  13304. return 0;
  13305. }
  13306. nbmin = 2;
  13307. ldwork = nw;
  13308. if (nb > 1 && nb < *k) {
  13309. iws = nw * nb;
  13310. if (*lwork < iws) {
  13311. nb = *lwork / ldwork;
  13312. /* Computing MAX Writing concatenation */
  13313. i__3[0] = 1, a__1[0] = (char *) side;
  13314. i__3[1] = 1, a__1[1] = (char *) trans;
  13315. s_cat (ch__1, (const char **) a__1, i__3, &c__2, 2);
  13316. i__1 = 2, i__2 = NUMlapack_ilaenv (&c__2, "DORMLQ", ch__1, m, n, k, &c_n1, 6, 2);
  13317. nbmin = MAX (i__1, i__2);
  13318. }
  13319. } else {
  13320. iws = nw;
  13321. }
  13322. if (nb < nbmin || nb >= *k) {
  13323. /* Use unblocked code */
  13324. NUMlapack_dorml2 (side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[c_offset], ldc, &work[1],
  13325. &iinfo);
  13326. } else {
  13327. /* Use blocked code */
  13328. if (left && notran || !left && !notran) {
  13329. i1 = 1;
  13330. i2 = *k;
  13331. i3 = nb;
  13332. } else {
  13333. i1 = (*k - 1) / nb * nb + 1;
  13334. i2 = 1;
  13335. i3 = -nb;
  13336. }
  13337. if (left) {
  13338. ni = *n;
  13339. jc = 1;
  13340. } else {
  13341. mi = *m;
  13342. ic = 1;
  13343. }
  13344. if (notran) {
  13345. * (unsigned char *) transt = 'T';
  13346. } else {
  13347. * (unsigned char *) transt = 'N';
  13348. }
  13349. i__1 = i2;
  13350. i__2 = i3;
  13351. for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  13352. /* Computing MIN */
  13353. i__4 = nb, i__5 = *k - i__ + 1;
  13354. ib = MIN (i__4, i__5);
  13355. /* Form the triangular factor of the block reflector H = H(i)
  13356. H(i+1) . . . H(i+ib-1) */
  13357. i__4 = nq - i__ + 1;
  13358. NUMlapack_dlarft ("Forward", "Rowwise", &i__4, &ib, &a_ref (i__, i__), lda, &tau[i__], t, &c__65);
  13359. if (left) {
  13360. /* H or H' is applied to C(i:m,1:n) */
  13361. mi = *m - i__ + 1;
  13362. ic = i__;
  13363. } else {
  13364. /* H or H' is applied to C(1:m,i:n) */
  13365. ni = *n - i__ + 1;
  13366. jc = i__;
  13367. }
  13368. /* Apply H or H' */
  13369. NUMlapack_dlarfb (side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a_ref (i__, i__), lda, t,
  13370. &c__65, &c___ref (ic, jc), ldc, &work[1], &ldwork);
  13371. /* L10: */
  13372. }
  13373. }
  13374. work[1] = (double) lwkopt;
  13375. return 0;
  13376. } /* NUMlapack_dormlq */
  13377. int NUMlapack_dormqr (const char *side, const char *trans, integer *m, integer *n, integer *k, double *a, integer *lda, double *tau,
  13378. double *c__, integer *ldc, double *work, integer *lwork, integer *info) {
  13379. /* Table of constant values */
  13380. static integer c__1 = 1;
  13381. static integer c_n1 = -1;
  13382. static integer c__2 = 2;
  13383. static integer c__65 = 65;
  13384. /* System generated locals */
  13385. char *a__1[2];
  13386. integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, i__5;
  13387. char ch__1[2];
  13388. /* Local variables */
  13389. static integer left;
  13390. static integer i__;
  13391. static double t[4160] /* was [65][64] */ ;
  13392. static integer nbmin, iinfo, i1, i2, i3;
  13393. static integer ib, ic, jc, nb, mi, ni;
  13394. static integer nq, nw;
  13395. static integer notran;
  13396. static integer ldwork, lwkopt;
  13397. static integer lquery;
  13398. static integer iws;
  13399. a_dim1 = *lda;
  13400. a_offset = 1 + a_dim1 * 1;
  13401. a -= a_offset;
  13402. --tau;
  13403. c_dim1 = *ldc;
  13404. c_offset = 1 + c_dim1 * 1;
  13405. c__ -= c_offset;
  13406. --work;
  13407. /* Function Body */
  13408. *info = 0;
  13409. left = lsame_ (side, "L");
  13410. notran = lsame_ (trans, "N");
  13411. lquery = *lwork == -1;
  13412. /* NQ is the order of Q and NW is the minimum dimension of WORK */
  13413. if (left) {
  13414. nq = *m;
  13415. nw = *n;
  13416. } else {
  13417. nq = *n;
  13418. nw = *m;
  13419. }
  13420. if (!left && !lsame_ (side, "R")) {
  13421. *info = -1;
  13422. } else if (!notran && !lsame_ (trans, "T")) {
  13423. *info = -2;
  13424. } else if (*m < 0) {
  13425. *info = -3;
  13426. } else if (*n < 0) {
  13427. *info = -4;
  13428. } else if (*k < 0 || *k > nq) {
  13429. *info = -5;
  13430. } else if (*lda < MAX (1, nq)) {
  13431. *info = -7;
  13432. } else if (*ldc < MAX (1, *m)) {
  13433. *info = -10;
  13434. } else if (*lwork < MAX (1, nw) && !lquery) {
  13435. *info = -12;
  13436. }
  13437. if (*info == 0) {
  13438. /* Determine the block size. NB may be at most NBMAX, where NBMAX is
  13439. used to define the local array T.
  13440. Computing MIN Writing concatenation */
  13441. i__3[0] = 1, a__1[0] = (char *) side;
  13442. i__3[1] = 1, a__1[1] = (char *) trans;
  13443. s_cat (ch__1, (const char **) a__1, i__3, &c__2, 2);
  13444. i__1 = 64, i__2 = NUMlapack_ilaenv (&c__1, "DORMQR", ch__1, m, n, k, &c_n1, 6, 2);
  13445. nb = MIN (i__1, i__2);
  13446. lwkopt = MAX (1, nw) * nb;
  13447. work[1] = (double) lwkopt;
  13448. }
  13449. if (*info != 0) {
  13450. i__1 = - (*info);
  13451. xerbla_ ("DORMQR", &i__1);
  13452. return 0;
  13453. } else if (lquery) {
  13454. return 0;
  13455. }
  13456. /* Quick return if possible */
  13457. if (*m == 0 || *n == 0 || *k == 0) {
  13458. work[1] = 1.;
  13459. return 0;
  13460. }
  13461. nbmin = 2;
  13462. ldwork = nw;
  13463. if (nb > 1 && nb < *k) {
  13464. iws = nw * nb;
  13465. if (*lwork < iws) {
  13466. nb = *lwork / ldwork;
  13467. /* Computing MAX Writing concatenation */
  13468. i__3[0] = 1, a__1[0] = (char *) side;
  13469. i__3[1] = 1, a__1[1] = (char *) trans;
  13470. s_cat (ch__1, (const char **) a__1, i__3, &c__2, 2);
  13471. i__1 = 2, i__2 = NUMlapack_ilaenv (&c__2, "DORMQR", ch__1, m, n, k, &c_n1, 6, 2);
  13472. nbmin = MAX (i__1, i__2);
  13473. }
  13474. } else {
  13475. iws = nw;
  13476. }
  13477. if (nb < nbmin || nb >= *k) {
  13478. /* Use unblocked code */
  13479. NUMlapack_dorm2r (side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[c_offset], ldc, &work[1],
  13480. &iinfo);
  13481. } else {
  13482. /* Use blocked code */
  13483. if (left && !notran || !left && notran) {
  13484. i1 = 1;
  13485. i2 = *k;
  13486. i3 = nb;
  13487. } else {
  13488. i1 = (*k - 1) / nb * nb + 1;
  13489. i2 = 1;
  13490. i3 = -nb;
  13491. }
  13492. if (left) {
  13493. ni = *n;
  13494. jc = 1;
  13495. } else {
  13496. mi = *m;
  13497. ic = 1;
  13498. }
  13499. i__1 = i2;
  13500. i__2 = i3;
  13501. for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  13502. /* Computing MIN */
  13503. i__4 = nb, i__5 = *k - i__ + 1;
  13504. ib = MIN (i__4, i__5);
  13505. /* Form the triangular factor of the block reflector H = H(i)
  13506. H(i+1) . . . H(i+ib-1) */
  13507. i__4 = nq - i__ + 1;
  13508. NUMlapack_dlarft ("Forward", "Columnwise", &i__4, &ib, &a_ref (i__, i__), lda, &tau[i__], t,
  13509. &c__65);
  13510. if (left) {
  13511. /* H or H' is applied to C(i:m,1:n) */
  13512. mi = *m - i__ + 1;
  13513. ic = i__;
  13514. } else {
  13515. /* H or H' is applied to C(1:m,i:n) */
  13516. ni = *n - i__ + 1;
  13517. jc = i__;
  13518. }
  13519. /* Apply H or H' */
  13520. NUMlapack_dlarfb (side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a_ref (i__, i__), lda, t,
  13521. &c__65, &c___ref (ic, jc), ldc, &work[1], &ldwork);
  13522. /* L10: */
  13523. }
  13524. }
  13525. work[1] = (double) lwkopt;
  13526. return 0;
  13527. } /* NUMlapack_dormqr */
  13528. int NUMlapack_dormr2 (const char *side, const char *trans, integer *m, integer *n, integer *k, double *a, integer *lda, double *tau,
  13529. double *c__, integer *ldc, double *work, integer *info) {
  13530. /* System generated locals */
  13531. integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
  13532. /* Local variables */
  13533. static integer left;
  13534. static integer i__;
  13535. static integer i1, i2, i3, mi, ni, nq;
  13536. static integer notran;
  13537. static double aii;
  13538. a_dim1 = *lda;
  13539. a_offset = 1 + a_dim1 * 1;
  13540. a -= a_offset;
  13541. --tau;
  13542. c_dim1 = *ldc;
  13543. c_offset = 1 + c_dim1 * 1;
  13544. c__ -= c_offset;
  13545. --work;
  13546. /* Function Body */
  13547. *info = 0;
  13548. left = lsame_ (side, "L");
  13549. notran = lsame_ (trans, "N");
  13550. /* NQ is the order of Q */
  13551. if (left) {
  13552. nq = *m;
  13553. } else {
  13554. nq = *n;
  13555. }
  13556. if (!left && !lsame_ (side, "R")) {
  13557. *info = -1;
  13558. } else if (!notran && !lsame_ (trans, "T")) {
  13559. *info = -2;
  13560. } else if (*m < 0) {
  13561. *info = -3;
  13562. } else if (*n < 0) {
  13563. *info = -4;
  13564. } else if (*k < 0 || *k > nq) {
  13565. *info = -5;
  13566. } else if (*lda < MAX (1, *k)) {
  13567. *info = -7;
  13568. } else if (*ldc < MAX (1, *m)) {
  13569. *info = -10;
  13570. }
  13571. if (*info != 0) {
  13572. i__1 = - (*info);
  13573. xerbla_ ("DORMR2", &i__1);
  13574. return 0;
  13575. }
  13576. /* Quick return if possible */
  13577. if (*m == 0 || *n == 0 || *k == 0) {
  13578. return 0;
  13579. }
  13580. if (left && !notran || !left && notran) {
  13581. i1 = 1;
  13582. i2 = *k;
  13583. i3 = 1;
  13584. } else {
  13585. i1 = *k;
  13586. i2 = 1;
  13587. i3 = -1;
  13588. }
  13589. if (left) {
  13590. ni = *n;
  13591. } else {
  13592. mi = *m;
  13593. }
  13594. i__1 = i2;
  13595. i__2 = i3;
  13596. for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  13597. if (left) {
  13598. /* H(i) is applied to C(1:m-k+i,1:n) */
  13599. mi = *m - *k + i__;
  13600. } else {
  13601. /* H(i) is applied to C(1:m,1:n-k+i) */
  13602. ni = *n - *k + i__;
  13603. }
  13604. /* Apply H(i) */
  13605. aii = a_ref (i__, nq - *k + i__);
  13606. a_ref (i__, nq - *k + i__) = 1.;
  13607. NUMlapack_dlarf (side, &mi, &ni, &a_ref (i__, 1), lda, &tau[i__], &c__[c_offset], ldc, &work[1]);
  13608. a_ref (i__, nq - *k + i__) = aii;
  13609. /* L10: */
  13610. }
  13611. return 0;
  13612. } /* NUMlapack_dormr2 */
  13613. int NUMlapack_dpotf2 (const char *uplo, integer *n, double *a, integer *lda, integer *info) {
  13614. /* Table of constant values */
  13615. static double c_b10 = -1.;
  13616. static double c_b12 = 1.;
  13617. /* System generated locals */
  13618. integer a_dim1, a_offset, i__1, i__2, i__3;
  13619. double d__1;
  13620. /* Local variables */
  13621. static integer j;
  13622. static int upper;
  13623. static double ajj;
  13624. a_dim1 = *lda;
  13625. a_offset = 1 + a_dim1 * 1;
  13626. a -= a_offset;
  13627. /* Function Body */
  13628. *info = 0;
  13629. upper = lsame_ (uplo, "U");
  13630. if (!upper && !lsame_ (uplo, "L")) {
  13631. *info = -1;
  13632. } else if (*n < 0) {
  13633. *info = -2;
  13634. } else if (*lda < MAX (1, *n)) {
  13635. *info = -4;
  13636. }
  13637. if (*info != 0) {
  13638. i__1 = - (*info);
  13639. xerbla_ ("DPOTF2", &i__1);
  13640. return 0;
  13641. }
  13642. /* Quick return if possible */
  13643. if (*n == 0) {
  13644. return 0;
  13645. }
  13646. if (upper) {
  13647. /* Compute the Cholesky factorization A = U'*U. */
  13648. i__1 = *n;
  13649. for (j = 1; j <= i__1; ++j) {
  13650. /* Compute U(J,J) and test for non-positive-definiteness. */
  13651. i__2 = j - 1;
  13652. ajj = a_ref (j, j) - NUMblas_ddot (&i__2, &a_ref (1, j), &c__1, &a_ref (1, j), &c__1);
  13653. if (ajj <= 0.) {
  13654. a_ref (j, j) = ajj;
  13655. goto L30;
  13656. }
  13657. ajj = sqrt (ajj);
  13658. a_ref (j, j) = ajj;
  13659. /* Compute elements J+1:N of row J. */
  13660. if (j < *n) {
  13661. i__2 = j - 1;
  13662. i__3 = *n - j;
  13663. NUMblas_dgemv ("Transpose", &i__2, &i__3, &c_b10, &a_ref (1, j + 1), lda, &a_ref (1, j),
  13664. &c__1, &c_b12, &a_ref (j, j + 1), lda);
  13665. i__2 = *n - j;
  13666. d__1 = 1. / ajj;
  13667. NUMblas_dscal (&i__2, &d__1, &a_ref (j, j + 1), lda);
  13668. }
  13669. /* L10: */
  13670. }
  13671. } else {
  13672. /* Compute the Cholesky factorization A = L*L'. */
  13673. i__1 = *n;
  13674. for (j = 1; j <= i__1; ++j) {
  13675. /* Compute L(J,J) and test for non-positive-definiteness. */
  13676. i__2 = j - 1;
  13677. ajj = a_ref (j, j) - NUMblas_ddot (&i__2, &a_ref (j, 1), lda, &a_ref (j, 1), lda);
  13678. if (ajj <= 0.) {
  13679. a_ref (j, j) = ajj;
  13680. goto L30;
  13681. }
  13682. ajj = sqrt (ajj);
  13683. a_ref (j, j) = ajj;
  13684. /* Compute elements J+1:N of column J. */
  13685. if (j < *n) {
  13686. i__2 = *n - j;
  13687. i__3 = j - 1;
  13688. NUMblas_dgemv ("No transpose", &i__2, &i__3, &c_b10, &a_ref (j + 1, 1), lda, &a_ref (j, 1),
  13689. lda, &c_b12, &a_ref (j + 1, j), &c__1);
  13690. i__2 = *n - j;
  13691. d__1 = 1. / ajj;
  13692. NUMblas_dscal (&i__2, &d__1, &a_ref (j + 1, j), &c__1);
  13693. }
  13694. /* L20: */
  13695. }
  13696. }
  13697. goto L40;
  13698. L30:
  13699. *info = j;
  13700. L40:
  13701. return 0;
  13702. } /* NUMlapack_dpotf2_ */
  13703. int NUMlapack_drscl (integer *n, double *sa, double *sx, integer *incx) {
  13704. static double cden;
  13705. static integer done;
  13706. static double cnum, cden1, cnum1;
  13707. static double bignum, smlnum, mul;
  13708. --sx;
  13709. /* Function Body */
  13710. if (*n <= 0) {
  13711. return 0;
  13712. }
  13713. /* Get machine parameters */
  13714. smlnum = NUMblas_dlamch ("S");
  13715. bignum = 1. / smlnum;
  13716. NUMlapack_dlabad (&smlnum, &bignum);
  13717. /* Initialize the denominator to SA and the numerator to 1. */
  13718. cden = *sa;
  13719. cnum = 1.;
  13720. L10:
  13721. cden1 = cden * smlnum;
  13722. cnum1 = cnum / bignum;
  13723. if (fabs (cden1) > fabs (cnum) && cnum != 0.) {
  13724. /* Pre-multiply X by SMLNUM if CDEN is large compared to CNUM. */
  13725. mul = smlnum;
  13726. done = FALSE;
  13727. cden = cden1;
  13728. } else if (fabs (cnum1) > fabs (cden)) {
  13729. /* Pre-multiply X by BIGNUM if CDEN is small compared to CNUM. */
  13730. mul = bignum;
  13731. done = FALSE;
  13732. cnum = cnum1;
  13733. } else {
  13734. /* Multiply X by CNUM / CDEN and return. */
  13735. mul = cnum / cden;
  13736. done = TRUE;
  13737. }
  13738. /* Scale the vector X by MUL */
  13739. NUMblas_dscal (n, &mul, &sx[1], incx);
  13740. if (!done) {
  13741. goto L10;
  13742. }
  13743. return 0;
  13744. } /* NUMlapack_drscl */
  13745. #define z___ref(a_1,a_2) z__[(a_2)*z_dim1 + a_1]
  13746. int NUMlapack_dsteqr (const char *compz, integer *n, double *d__, double *e, double *z__, integer *ldz, double *work,
  13747. integer *info) {
  13748. /* Table of constant values */
  13749. static double c_b9 = 0.;
  13750. static double c_b10 = 1.;
  13751. static integer c__0 = 0;
  13752. static integer c__1 = 1;
  13753. static integer c__2 = 2;
  13754. /* System generated locals */
  13755. integer z_dim1, z_offset, i__1, i__2;
  13756. double d__1, d__2;
  13757. /* Local variables */
  13758. static integer lend, jtot;
  13759. static double b, c__, f, g;
  13760. static integer i__, j, k, l, m;
  13761. static double p, r__, s;
  13762. static double anorm;
  13763. static integer l1;
  13764. static integer lendm1, lendp1;
  13765. static integer ii;
  13766. static integer mm, iscale;
  13767. static double safmin;
  13768. static double safmax;
  13769. static integer lendsv;
  13770. static double ssfmin;
  13771. static integer nmaxit, icompz;
  13772. static double ssfmax;
  13773. static integer lm1, mm1, nm1;
  13774. static double rt1, rt2, eps;
  13775. static integer lsv;
  13776. static double tst, eps2;
  13777. --d__;
  13778. --e;
  13779. z_dim1 = *ldz;
  13780. z_offset = 1 + z_dim1 * 1;
  13781. z__ -= z_offset;
  13782. --work;
  13783. /* Function Body */
  13784. *info = 0;
  13785. if (lsame_ (compz, "N")) {
  13786. icompz = 0;
  13787. } else if (lsame_ (compz, "V")) {
  13788. icompz = 1;
  13789. } else if (lsame_ (compz, "I")) {
  13790. icompz = 2;
  13791. } else {
  13792. icompz = -1;
  13793. }
  13794. if (icompz < 0) {
  13795. *info = -1;
  13796. } else if (*n < 0) {
  13797. *info = -2;
  13798. } else if (*ldz < 1 || icompz > 0 && *ldz < MAX (1, *n)) {
  13799. *info = -6;
  13800. }
  13801. if (*info != 0) {
  13802. i__1 = - (*info);
  13803. xerbla_ ("DSTEQR", &i__1);
  13804. return 0;
  13805. }
  13806. /* Quick return if possible */
  13807. if (*n == 0) {
  13808. return 0;
  13809. }
  13810. if (*n == 1) {
  13811. if (icompz == 2) {
  13812. z___ref (1, 1) = 1.;
  13813. }
  13814. return 0;
  13815. }
  13816. /* Determine the unit roundoff and over/underflow thresholds. */
  13817. eps = NUMblas_dlamch ("E");
  13818. /* Computing 2nd power */
  13819. d__1 = eps;
  13820. eps2 = d__1 * d__1;
  13821. safmin = NUMblas_dlamch ("S");
  13822. safmax = 1. / safmin;
  13823. ssfmax = sqrt (safmax) / 3.;
  13824. ssfmin = sqrt (safmin) / eps2;
  13825. /* Compute the eigenvalues and eigenvectors of the tridiagonal matrix. */
  13826. if (icompz == 2) {
  13827. NUMlapack_dlaset ("Full", n, n, &c_b9, &c_b10, &z__[z_offset], ldz);
  13828. }
  13829. nmaxit = *n * 30;
  13830. jtot = 0;
  13831. /* Determine where the matrix splits and choose QL or QR iteration for
  13832. each block, according to whether top or bottom diagonal element is
  13833. smaller. */
  13834. l1 = 1;
  13835. nm1 = *n - 1;
  13836. L10:
  13837. if (l1 > *n) {
  13838. goto L160;
  13839. }
  13840. if (l1 > 1) {
  13841. e[l1 - 1] = 0.;
  13842. }
  13843. if (l1 <= nm1) {
  13844. i__1 = nm1;
  13845. for (m = l1; m <= i__1; ++m) {
  13846. tst = (d__1 = e[m], fabs (d__1));
  13847. if (tst == 0.) {
  13848. goto L30;
  13849. }
  13850. if (tst <= sqrt ( (d__1 = d__[m], fabs (d__1))) * sqrt ( (d__2 = d__[m + 1], fabs (d__2))) * eps) {
  13851. e[m] = 0.;
  13852. goto L30;
  13853. }
  13854. /* L20: */
  13855. }
  13856. }
  13857. m = *n;
  13858. L30:
  13859. l = l1;
  13860. lsv = l;
  13861. lend = m;
  13862. lendsv = lend;
  13863. l1 = m + 1;
  13864. if (lend == l) {
  13865. goto L10;
  13866. }
  13867. /* Scale submatrix in rows and columns L to LEND */
  13868. i__1 = lend - l + 1;
  13869. anorm = NUMlapack_dlanst ("I", &i__1, &d__[l], &e[l]);
  13870. iscale = 0;
  13871. if (anorm == 0.) {
  13872. goto L10;
  13873. }
  13874. if (anorm > ssfmax) {
  13875. iscale = 1;
  13876. i__1 = lend - l + 1;
  13877. NUMlapack_dlascl ("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n, info);
  13878. i__1 = lend - l;
  13879. NUMlapack_dlascl ("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n, info);
  13880. } else if (anorm < ssfmin) {
  13881. iscale = 2;
  13882. i__1 = lend - l + 1;
  13883. NUMlapack_dlascl ("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n, info);
  13884. i__1 = lend - l;
  13885. NUMlapack_dlascl ("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n, info);
  13886. }
  13887. /* Choose between QL and QR iteration */
  13888. if ( (d__1 = d__[lend], fabs (d__1)) < (d__2 = d__[l], fabs (d__2))) {
  13889. lend = lsv;
  13890. l = lendsv;
  13891. }
  13892. if (lend > l) {
  13893. /* QL Iteration
  13894. Look for small subdiagonal element. */
  13895. L40:
  13896. if (l != lend) {
  13897. lendm1 = lend - 1;
  13898. i__1 = lendm1;
  13899. for (m = l; m <= i__1; ++m) {
  13900. /* Computing 2nd power */
  13901. d__2 = (d__1 = e[m], fabs (d__1));
  13902. tst = d__2 * d__2;
  13903. if (tst <= eps2 * (d__1 = d__[m], fabs (d__1)) * (d__2 = d__[m + 1], fabs (d__2)) + safmin) {
  13904. goto L60;
  13905. }
  13906. /* L50: */
  13907. }
  13908. }
  13909. m = lend;
  13910. L60:
  13911. if (m < lend) {
  13912. e[m] = 0.;
  13913. }
  13914. p = d__[l];
  13915. if (m == l) {
  13916. goto L80;
  13917. }
  13918. /* If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 to compute its
  13919. eigensystem. */
  13920. if (m == l + 1) {
  13921. if (icompz > 0) {
  13922. NUMlapack_dlaev2 (&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
  13923. work[l] = c__;
  13924. work[*n - 1 + l] = s;
  13925. NUMlapack_dlasr ("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &z___ref (1, l), ldz);
  13926. } else {
  13927. NUMlapack_dlae2 (&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
  13928. }
  13929. d__[l] = rt1;
  13930. d__[l + 1] = rt2;
  13931. e[l] = 0.;
  13932. l += 2;
  13933. if (l <= lend) {
  13934. goto L40;
  13935. }
  13936. goto L140;
  13937. }
  13938. if (jtot == nmaxit) {
  13939. goto L140;
  13940. }
  13941. ++jtot;
  13942. /* Form shift. */
  13943. g = (d__[l + 1] - p) / (e[l] * 2.);
  13944. r__ = NUMlapack_dlapy2 (&g, &c_b10);
  13945. g = d__[m] - p + e[l] / (g + d_sign (&r__, &g));
  13946. s = 1.;
  13947. c__ = 1.;
  13948. p = 0.;
  13949. /* Inner loop */
  13950. mm1 = m - 1;
  13951. i__1 = l;
  13952. for (i__ = mm1; i__ >= i__1; --i__) {
  13953. f = s * e[i__];
  13954. b = c__ * e[i__];
  13955. NUMlapack_dlartg (&g, &f, &c__, &s, &r__);
  13956. if (i__ != m - 1) {
  13957. e[i__ + 1] = r__;
  13958. }
  13959. g = d__[i__ + 1] - p;
  13960. r__ = (d__[i__] - g) * s + c__ * 2. * b;
  13961. p = s * r__;
  13962. d__[i__ + 1] = g + p;
  13963. g = c__ * r__ - b;
  13964. /* If eigenvectors are desired, then save rotations. */
  13965. if (icompz > 0) {
  13966. work[i__] = c__;
  13967. work[*n - 1 + i__] = -s;
  13968. }
  13969. /* L70: */
  13970. }
  13971. /* If eigenvectors are desired, then apply saved rotations. */
  13972. if (icompz > 0) {
  13973. mm = m - l + 1;
  13974. NUMlapack_dlasr ("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z___ref (1, l), ldz);
  13975. }
  13976. d__[l] -= p;
  13977. e[l] = g;
  13978. goto L40;
  13979. /* Eigenvalue found. */
  13980. L80:
  13981. d__[l] = p;
  13982. ++l;
  13983. if (l <= lend) {
  13984. goto L40;
  13985. }
  13986. goto L140;
  13987. } else {
  13988. /* QR Iteration
  13989. Look for small superdiagonal element. */
  13990. L90:
  13991. if (l != lend) {
  13992. lendp1 = lend + 1;
  13993. i__1 = lendp1;
  13994. for (m = l; m >= i__1; --m) {
  13995. /* Computing 2nd power */
  13996. d__2 = (d__1 = e[m - 1], fabs (d__1));
  13997. tst = d__2 * d__2;
  13998. if (tst <= eps2 * (d__1 = d__[m], fabs (d__1)) * (d__2 = d__[m - 1], fabs (d__2)) + safmin) {
  13999. goto L110;
  14000. }
  14001. /* L100: */
  14002. }
  14003. }
  14004. m = lend;
  14005. L110:
  14006. if (m > lend) {
  14007. e[m - 1] = 0.;
  14008. }
  14009. p = d__[l];
  14010. if (m == l) {
  14011. goto L130;
  14012. }
  14013. /* If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 to compute its
  14014. eigensystem. */
  14015. if (m == l - 1) {
  14016. if (icompz > 0) {
  14017. NUMlapack_dlaev2 (&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s);
  14018. work[m] = c__;
  14019. work[*n - 1 + m] = s;
  14020. NUMlapack_dlasr ("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &z___ref (1, l - 1),
  14021. ldz);
  14022. } else {
  14023. NUMlapack_dlae2 (&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
  14024. }
  14025. d__[l - 1] = rt1;
  14026. d__[l] = rt2;
  14027. e[l - 1] = 0.;
  14028. l += -2;
  14029. if (l >= lend) {
  14030. goto L90;
  14031. }
  14032. goto L140;
  14033. }
  14034. if (jtot == nmaxit) {
  14035. goto L140;
  14036. }
  14037. ++jtot;
  14038. /* Form shift. */
  14039. g = (d__[l - 1] - p) / (e[l - 1] * 2.);
  14040. r__ = NUMlapack_dlapy2 (&g, &c_b10);
  14041. g = d__[m] - p + e[l - 1] / (g + d_sign (&r__, &g));
  14042. s = 1.;
  14043. c__ = 1.;
  14044. p = 0.;
  14045. /* Inner loop */
  14046. lm1 = l - 1;
  14047. i__1 = lm1;
  14048. for (i__ = m; i__ <= i__1; ++i__) {
  14049. f = s * e[i__];
  14050. b = c__ * e[i__];
  14051. NUMlapack_dlartg (&g, &f, &c__, &s, &r__);
  14052. if (i__ != m) {
  14053. e[i__ - 1] = r__;
  14054. }
  14055. g = d__[i__] - p;
  14056. r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b;
  14057. p = s * r__;
  14058. d__[i__] = g + p;
  14059. g = c__ * r__ - b;
  14060. /* If eigenvectors are desired, then save rotations. */
  14061. if (icompz > 0) {
  14062. work[i__] = c__;
  14063. work[*n - 1 + i__] = s;
  14064. }
  14065. /* L120: */
  14066. }
  14067. /* If eigenvectors are desired, then apply saved rotations. */
  14068. if (icompz > 0) {
  14069. mm = l - m + 1;
  14070. NUMlapack_dlasr ("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z___ref (1, m), ldz);
  14071. }
  14072. d__[l] -= p;
  14073. e[lm1] = g;
  14074. goto L90;
  14075. /* Eigenvalue found. */
  14076. L130:
  14077. d__[l] = p;
  14078. --l;
  14079. if (l >= lend) {
  14080. goto L90;
  14081. }
  14082. goto L140;
  14083. }
  14084. /* Undo scaling if necessary */
  14085. L140:
  14086. if (iscale == 1) {
  14087. i__1 = lendsv - lsv + 1;
  14088. NUMlapack_dlascl ("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv], n, info);
  14089. i__1 = lendsv - lsv;
  14090. NUMlapack_dlascl ("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n, info);
  14091. } else if (iscale == 2) {
  14092. i__1 = lendsv - lsv + 1;
  14093. NUMlapack_dlascl ("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv], n, info);
  14094. i__1 = lendsv - lsv;
  14095. NUMlapack_dlascl ("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n, info);
  14096. }
  14097. /* Check for no convergence to an eigenvalue after a total of N*MAXIT
  14098. iterations. */
  14099. if (jtot < nmaxit) {
  14100. goto L10;
  14101. }
  14102. i__1 = *n - 1;
  14103. for (i__ = 1; i__ <= i__1; ++i__) {
  14104. if (e[i__] != 0.) {
  14105. ++ (*info);
  14106. }
  14107. /* L150: */
  14108. }
  14109. goto L190;
  14110. /* Order eigenvalues and eigenvectors. */
  14111. L160:
  14112. if (icompz == 0) {
  14113. /* Use Quick Sort */
  14114. NUMlapack_dlasrt ("I", n, &d__[1], info);
  14115. } else {
  14116. /* Use Selection Sort to minimize swaps of eigenvectors */
  14117. i__1 = *n;
  14118. for (ii = 2; ii <= i__1; ++ii) {
  14119. i__ = ii - 1;
  14120. k = i__;
  14121. p = d__[i__];
  14122. i__2 = *n;
  14123. for (j = ii; j <= i__2; ++j) {
  14124. if (d__[j] < p) {
  14125. k = j;
  14126. p = d__[j];
  14127. }
  14128. /* L170: */
  14129. }
  14130. if (k != i__) {
  14131. d__[k] = d__[i__];
  14132. d__[i__] = p;
  14133. NUMblas_dswap (n, &z___ref (1, i__), &c__1, &z___ref (1, k), &c__1);
  14134. }
  14135. /* L180: */
  14136. }
  14137. }
  14138. L190:
  14139. return 0;
  14140. } /* NUMlapack_dsteqr */
  14141. #undef z___ref
  14142. int NUMlapack_dsterf (integer *n, double *d__, double *e, integer *info) {
  14143. /* Table of constant values */
  14144. static integer c__0 = 0;
  14145. static integer c__1 = 1;
  14146. static double c_b32 = 1.;
  14147. /* System generated locals */
  14148. integer i__1;
  14149. double d__1, d__2, d__3;
  14150. /* Local variables */
  14151. static double oldc;
  14152. static integer lend, jtot;
  14153. static double c__;
  14154. static integer i__, l, m;
  14155. static double p, gamma, r__, s, alpha, sigma, anorm;
  14156. static integer l1;
  14157. static double bb;
  14158. static integer iscale;
  14159. static double oldgam, safmin;
  14160. static double safmax;
  14161. static integer lendsv;
  14162. static double ssfmin;
  14163. static integer nmaxit;
  14164. static double ssfmax, rt1, rt2, eps, rte;
  14165. static integer lsv;
  14166. static double eps2;
  14167. --e;
  14168. --d__;
  14169. /* Function Body */
  14170. *info = 0;
  14171. /* Quick return if possible */
  14172. if (*n < 0) {
  14173. *info = -1;
  14174. i__1 = - (*info);
  14175. xerbla_ ("DSTERF", &i__1);
  14176. return 0;
  14177. }
  14178. if (*n <= 1) {
  14179. return 0;
  14180. }
  14181. /* Determine the unit roundoff for this environment. */
  14182. eps = NUMblas_dlamch ("E");
  14183. /* Computing 2nd power */
  14184. d__1 = eps;
  14185. eps2 = d__1 * d__1;
  14186. safmin = NUMblas_dlamch ("S");
  14187. safmax = 1. / safmin;
  14188. ssfmax = sqrt (safmax) / 3.;
  14189. ssfmin = sqrt (safmin) / eps2;
  14190. /* Compute the eigenvalues of the tridiagonal matrix. */
  14191. nmaxit = *n * 30;
  14192. sigma = 0.;
  14193. jtot = 0;
  14194. /* Determine where the matrix splits and choose QL or QR iteration for
  14195. each block, according to whether top or bottom diagonal element is
  14196. smaller. */
  14197. l1 = 1;
  14198. L10:
  14199. if (l1 > *n) {
  14200. goto L170;
  14201. }
  14202. if (l1 > 1) {
  14203. e[l1 - 1] = 0.;
  14204. }
  14205. i__1 = *n - 1;
  14206. for (m = l1; m <= i__1; ++m) {
  14207. if ( (d__3 = e[m], fabs (d__3)) <= sqrt ( (d__1 = d__[m], fabs (d__1))) * sqrt ( (d__2 =
  14208. d__[m + 1], fabs (d__2))) * eps) {
  14209. e[m] = 0.;
  14210. goto L30;
  14211. }
  14212. /* L20: */
  14213. }
  14214. m = *n;
  14215. L30:
  14216. l = l1;
  14217. lsv = l;
  14218. lend = m;
  14219. lendsv = lend;
  14220. l1 = m + 1;
  14221. if (lend == l) {
  14222. goto L10;
  14223. }
  14224. /* Scale submatrix in rows and columns L to LEND */
  14225. i__1 = lend - l + 1;
  14226. anorm = NUMlapack_dlanst ("I", &i__1, &d__[l], &e[l]);
  14227. iscale = 0;
  14228. if (anorm > ssfmax) {
  14229. iscale = 1;
  14230. i__1 = lend - l + 1;
  14231. NUMlapack_dlascl ("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n, info);
  14232. i__1 = lend - l;
  14233. NUMlapack_dlascl ("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n, info);
  14234. } else if (anorm < ssfmin) {
  14235. iscale = 2;
  14236. i__1 = lend - l + 1;
  14237. NUMlapack_dlascl ("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n, info);
  14238. i__1 = lend - l;
  14239. NUMlapack_dlascl ("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n, info);
  14240. }
  14241. i__1 = lend - 1;
  14242. for (i__ = l; i__ <= i__1; ++i__) {
  14243. /* Computing 2nd power */
  14244. d__1 = e[i__];
  14245. e[i__] = d__1 * d__1;
  14246. /* L40: */
  14247. }
  14248. /* Choose between QL and QR iteration */
  14249. if ( (d__1 = d__[lend], fabs (d__1)) < (d__2 = d__[l], fabs (d__2))) {
  14250. lend = lsv;
  14251. l = lendsv;
  14252. }
  14253. if (lend >= l) {
  14254. /* QL Iteration
  14255. Look for small subdiagonal element. */
  14256. L50:
  14257. if (l != lend) {
  14258. i__1 = lend - 1;
  14259. for (m = l; m <= i__1; ++m) {
  14260. if ( (d__2 = e[m], fabs (d__2)) <= eps2 * (d__1 = d__[m] * d__[m + 1], fabs (d__1))) {
  14261. goto L70;
  14262. }
  14263. /* L60: */
  14264. }
  14265. }
  14266. m = lend;
  14267. L70:
  14268. if (m < lend) {
  14269. e[m] = 0.;
  14270. }
  14271. p = d__[l];
  14272. if (m == l) {
  14273. goto L90;
  14274. }
  14275. /* If remaining matrix is 2 by 2, use DLAE2 to compute its
  14276. eigenvalues. */
  14277. if (m == l + 1) {
  14278. rte = sqrt (e[l]);
  14279. NUMlapack_dlae2 (&d__[l], &rte, &d__[l + 1], &rt1, &rt2);
  14280. d__[l] = rt1;
  14281. d__[l + 1] = rt2;
  14282. e[l] = 0.;
  14283. l += 2;
  14284. if (l <= lend) {
  14285. goto L50;
  14286. }
  14287. goto L150;
  14288. }
  14289. if (jtot == nmaxit) {
  14290. goto L150;
  14291. }
  14292. ++jtot;
  14293. /* Form shift. */
  14294. rte = sqrt (e[l]);
  14295. sigma = (d__[l + 1] - p) / (rte * 2.);
  14296. r__ = NUMlapack_dlapy2 (&sigma, &c_b32);
  14297. sigma = p - rte / (sigma + d_sign (&r__, &sigma));
  14298. c__ = 1.;
  14299. s = 0.;
  14300. gamma = d__[m] - sigma;
  14301. p = gamma * gamma;
  14302. /* Inner loop */
  14303. i__1 = l;
  14304. for (i__ = m - 1; i__ >= i__1; --i__) {
  14305. bb = e[i__];
  14306. r__ = p + bb;
  14307. if (i__ != m - 1) {
  14308. e[i__ + 1] = s * r__;
  14309. }
  14310. oldc = c__;
  14311. c__ = p / r__;
  14312. s = bb / r__;
  14313. oldgam = gamma;
  14314. alpha = d__[i__];
  14315. gamma = c__ * (alpha - sigma) - s * oldgam;
  14316. d__[i__ + 1] = oldgam + (alpha - gamma);
  14317. if (c__ != 0.) {
  14318. p = gamma * gamma / c__;
  14319. } else {
  14320. p = oldc * bb;
  14321. }
  14322. /* L80: */
  14323. }
  14324. e[l] = s * p;
  14325. d__[l] = sigma + gamma;
  14326. goto L50;
  14327. /* Eigenvalue found. */
  14328. L90:
  14329. d__[l] = p;
  14330. ++l;
  14331. if (l <= lend) {
  14332. goto L50;
  14333. }
  14334. goto L150;
  14335. } else {
  14336. /* QR Iteration
  14337. Look for small superdiagonal element. */
  14338. L100:
  14339. i__1 = lend + 1;
  14340. for (m = l; m >= i__1; --m) {
  14341. if ( (d__2 = e[m - 1], fabs (d__2)) <= eps2 * (d__1 = d__[m] * d__[m - 1], fabs (d__1))) {
  14342. goto L120;
  14343. }
  14344. /* L110: */
  14345. }
  14346. m = lend;
  14347. L120:
  14348. if (m > lend) {
  14349. e[m - 1] = 0.;
  14350. }
  14351. p = d__[l];
  14352. if (m == l) {
  14353. goto L140;
  14354. }
  14355. /* If remaining matrix is 2 by 2, use DLAE2 to compute its
  14356. eigenvalues. */
  14357. if (m == l - 1) {
  14358. rte = sqrt (e[l - 1]);
  14359. NUMlapack_dlae2 (&d__[l], &rte, &d__[l - 1], &rt1, &rt2);
  14360. d__[l] = rt1;
  14361. d__[l - 1] = rt2;
  14362. e[l - 1] = 0.;
  14363. l += -2;
  14364. if (l >= lend) {
  14365. goto L100;
  14366. }
  14367. goto L150;
  14368. }
  14369. if (jtot == nmaxit) {
  14370. goto L150;
  14371. }
  14372. ++jtot;
  14373. /* Form shift. */
  14374. rte = sqrt (e[l - 1]);
  14375. sigma = (d__[l - 1] - p) / (rte * 2.);
  14376. r__ = NUMlapack_dlapy2 (&sigma, &c_b32);
  14377. sigma = p - rte / (sigma + d_sign (&r__, &sigma));
  14378. c__ = 1.;
  14379. s = 0.;
  14380. gamma = d__[m] - sigma;
  14381. p = gamma * gamma;
  14382. /* Inner loop */
  14383. i__1 = l - 1;
  14384. for (i__ = m; i__ <= i__1; ++i__) {
  14385. bb = e[i__];
  14386. r__ = p + bb;
  14387. if (i__ != m) {
  14388. e[i__ - 1] = s * r__;
  14389. }
  14390. oldc = c__;
  14391. c__ = p / r__;
  14392. s = bb / r__;
  14393. oldgam = gamma;
  14394. alpha = d__[i__ + 1];
  14395. gamma = c__ * (alpha - sigma) - s * oldgam;
  14396. d__[i__] = oldgam + (alpha - gamma);
  14397. if (c__ != 0.) {
  14398. p = gamma * gamma / c__;
  14399. } else {
  14400. p = oldc * bb;
  14401. }
  14402. /* L130: */
  14403. }
  14404. e[l - 1] = s * p;
  14405. d__[l] = sigma + gamma;
  14406. goto L100;
  14407. /* Eigenvalue found. */
  14408. L140:
  14409. d__[l] = p;
  14410. --l;
  14411. if (l >= lend) {
  14412. goto L100;
  14413. }
  14414. goto L150;
  14415. }
  14416. /* Undo scaling if necessary */
  14417. L150:
  14418. if (iscale == 1) {
  14419. i__1 = lendsv - lsv + 1;
  14420. NUMlapack_dlascl ("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv], n, info);
  14421. }
  14422. if (iscale == 2) {
  14423. i__1 = lendsv - lsv + 1;
  14424. NUMlapack_dlascl ("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv], n, info);
  14425. }
  14426. /* Check for no convergence to an eigenvalue after a total of N*MAXIT
  14427. iterations. */
  14428. if (jtot < nmaxit) {
  14429. goto L10;
  14430. }
  14431. i__1 = *n - 1;
  14432. for (i__ = 1; i__ <= i__1; ++i__) {
  14433. if (e[i__] != 0.) {
  14434. ++ (*info);
  14435. }
  14436. /* L160: */
  14437. }
  14438. goto L180;
  14439. /* Sort eigenvalues in increasing order. */
  14440. L170:
  14441. NUMlapack_dlasrt ("I", n, &d__[1], info);
  14442. L180:
  14443. return 0;
  14444. } /* NUMlapack_dsterf */
  14445. int NUMlapack_dsyev (const char *jobz, const char *uplo, integer *n, double *a, integer *lda, double *w, double *work,
  14446. integer *lwork, integer *info) {
  14447. /* Table of constant values */
  14448. static integer c__1 = 1;
  14449. static integer c_n1 = -1;
  14450. static integer c__0 = 0;
  14451. static double c_b17 = 1.;
  14452. /* System generated locals */
  14453. integer a_dim1, a_offset, i__1, i__2;
  14454. double d__1;
  14455. /* Local variables */
  14456. static integer inde;
  14457. static double anrm;
  14458. static integer imax;
  14459. static double rmin, rmax;
  14460. static integer lopt;
  14461. static double sigma;
  14462. static integer iinfo;
  14463. static integer lower, wantz;
  14464. static integer nb;
  14465. static integer iscale;
  14466. static double safmin;
  14467. static double bignum;
  14468. static integer indtau;
  14469. static integer indwrk;
  14470. static integer llwork;
  14471. static double smlnum;
  14472. static integer lwkopt;
  14473. static integer lquery;
  14474. static double eps;
  14475. a_dim1 = *lda;
  14476. a_offset = 1 + a_dim1 * 1;
  14477. a -= a_offset;
  14478. --w;
  14479. --work;
  14480. /* Function Body */
  14481. wantz = lsame_ (jobz, "V");
  14482. lower = lsame_ (uplo, "L");
  14483. lquery = *lwork == -1;
  14484. *info = 0;
  14485. if (! (wantz || lsame_ (jobz, "N"))) {
  14486. *info = -1;
  14487. } else if (! (lower || lsame_ (uplo, "U"))) {
  14488. *info = -2;
  14489. } else if (*n < 0) {
  14490. *info = -3;
  14491. } else if (*lda < MAX (1, *n)) {
  14492. *info = -5;
  14493. } else { /* if(complicated condition) */
  14494. /* Computing MAX */
  14495. i__1 = 1, i__2 = *n * 3 - 1;
  14496. if (*lwork < MAX (i__1, i__2) && !lquery) {
  14497. *info = -8;
  14498. }
  14499. }
  14500. if (*info == 0) {
  14501. nb = NUMlapack_ilaenv (&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, 6, 1);
  14502. /* Computing MAX */
  14503. i__1 = 1, i__2 = (nb + 2) * *n;
  14504. lwkopt = MAX (i__1, i__2);
  14505. work[1] = (double) lwkopt;
  14506. }
  14507. if (*info != 0) {
  14508. i__1 = - (*info);
  14509. xerbla_ ("DSYEV ", &i__1);
  14510. return 0;
  14511. } else if (lquery) {
  14512. return 0;
  14513. }
  14514. /* Quick return if possible */
  14515. if (*n == 0) {
  14516. work[1] = 1.;
  14517. return 0;
  14518. }
  14519. if (*n == 1) {
  14520. w[1] = a_ref (1, 1);
  14521. work[1] = 3.;
  14522. if (wantz) {
  14523. a_ref (1, 1) = 1.;
  14524. }
  14525. return 0;
  14526. }
  14527. /* Get machine constants. */
  14528. safmin = NUMblas_dlamch ("Safe minimum");
  14529. eps = NUMblas_dlamch ("Precision");
  14530. smlnum = safmin / eps;
  14531. bignum = 1. / smlnum;
  14532. rmin = sqrt (smlnum);
  14533. rmax = sqrt (bignum);
  14534. /* Scale matrix to allowable range, if necessary. */
  14535. anrm = NUMlapack_dlansy ("M", uplo, n, &a[a_offset], lda, &work[1]);
  14536. iscale = 0;
  14537. if (anrm > 0. && anrm < rmin) {
  14538. iscale = 1;
  14539. sigma = rmin / anrm;
  14540. } else if (anrm > rmax) {
  14541. iscale = 1;
  14542. sigma = rmax / anrm;
  14543. }
  14544. if (iscale == 1) {
  14545. NUMlapack_dlascl (uplo, &c__0, &c__0, &c_b17, &sigma, n, n, &a[a_offset], lda, info);
  14546. }
  14547. /* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */
  14548. inde = 1;
  14549. indtau = inde + *n;
  14550. indwrk = indtau + *n;
  14551. llwork = *lwork - indwrk + 1;
  14552. NUMlapack_dsytrd (uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], &work[indwrk], &llwork,
  14553. &iinfo);
  14554. lopt = (integer) ( (*n << 1) + work[indwrk]);
  14555. /* For eigenvalues only, call DSTERF. For eigenvectors, first call
  14556. DORGTR to generate the orthogonal matrix, then call DSTEQR. */
  14557. if (!wantz) {
  14558. NUMlapack_dsterf (n, &w[1], &work[inde], info);
  14559. } else {
  14560. NUMlapack_dorgtr (uplo, n, &a[a_offset], lda, &work[indtau], &work[indwrk], &llwork, &iinfo);
  14561. NUMlapack_dsteqr (jobz, n, &w[1], &work[inde], &a[a_offset], lda, &work[indtau], info);
  14562. }
  14563. /* If matrix was scaled, then rescale eigenvalues appropriately. */
  14564. if (iscale == 1) {
  14565. if (*info == 0) {
  14566. imax = *n;
  14567. } else {
  14568. imax = *info - 1;
  14569. }
  14570. d__1 = 1. / sigma;
  14571. NUMblas_dscal (&imax, &d__1, &w[1], &c__1);
  14572. }
  14573. /* Set WORK(1) to optimal workspace size. */
  14574. work[1] = (double) lwkopt;
  14575. return 0;
  14576. } /* NUMlapack_dsyev */
  14577. int NUMlapack_dsytd2 (const char *uplo, integer *n, double *a, integer *lda, double *d__, double *e, double *tau,
  14578. integer *info) {
  14579. /* Table of constant values */
  14580. static integer c__1 = 1;
  14581. static double c_b8 = 0.;
  14582. static double c_b14 = -1.;
  14583. /* System generated locals */
  14584. integer a_dim1, a_offset, i__1, i__2, i__3;
  14585. /* Local variables */
  14586. static double taui;
  14587. static integer i__;
  14588. static double alpha;
  14589. static integer upper;
  14590. a_dim1 = *lda;
  14591. a_offset = 1 + a_dim1 * 1;
  14592. a -= a_offset;
  14593. --d__;
  14594. --e;
  14595. --tau;
  14596. /* Function Body */
  14597. *info = 0;
  14598. upper = lsame_ (uplo, "U");
  14599. if (!upper && !lsame_ (uplo, "L")) {
  14600. *info = -1;
  14601. } else if (*n < 0) {
  14602. *info = -2;
  14603. } else if (*lda < MAX (1, *n)) {
  14604. *info = -4;
  14605. }
  14606. if (*info != 0) {
  14607. i__1 = - (*info);
  14608. xerbla_ ("DSYTD2", &i__1);
  14609. return 0;
  14610. }
  14611. /* Quick return if possible */
  14612. if (*n <= 0) {
  14613. return 0;
  14614. }
  14615. if (upper) {
  14616. /* Reduce the upper triangle of A */
  14617. for (i__ = *n - 1; i__ >= 1; --i__) {
  14618. /* Generate elementary reflector H(i) = I - tau * v * v' to
  14619. annihilate A(1:i-1,i+1) */
  14620. NUMlapack_dlarfg (&i__, &a_ref (i__, i__ + 1), &a_ref (1, i__ + 1), &c__1, &taui);
  14621. e[i__] = a_ref (i__, i__ + 1);
  14622. if (taui != 0.) {
  14623. /* Apply H(i) from both sides to A(1:i,1:i) */
  14624. a_ref (i__, i__ + 1) = 1.;
  14625. /* Compute x := tau * A * v storing x in TAU(1:i) */
  14626. NUMblas_dsymv (uplo, &i__, &taui, &a[a_offset], lda, &a_ref (1, i__ + 1), &c__1, &c_b8, &tau[1],
  14627. &c__1);
  14628. /* Compute w := x - 1/2 * tau * (x'*v) * v */
  14629. alpha = taui * -.5 * NUMblas_ddot (&i__, &tau[1], &c__1, &a_ref (1, i__ + 1), &c__1);
  14630. NUMblas_daxpy (&i__, &alpha, &a_ref (1, i__ + 1), &c__1, &tau[1], &c__1);
  14631. /* Apply the transformation as a rank-2 update: A := A - v *
  14632. w' - w * v' */
  14633. NUMblas_dsyr2 (uplo, &i__, &c_b14, &a_ref (1, i__ + 1), &c__1, &tau[1], &c__1, &a[a_offset], lda);
  14634. a_ref (i__, i__ + 1) = e[i__];
  14635. }
  14636. d__[i__ + 1] = a_ref (i__ + 1, i__ + 1);
  14637. tau[i__] = taui;
  14638. /* L10: */
  14639. }
  14640. d__[1] = a_ref (1, 1);
  14641. } else {
  14642. /* Reduce the lower triangle of A */
  14643. i__1 = *n - 1;
  14644. for (i__ = 1; i__ <= i__1; ++i__) {
  14645. /* Generate elementary reflector H(i) = I - tau * v * v' to
  14646. annihilate A(i+2:n,i)
  14647. Computing MIN */
  14648. i__2 = i__ + 2;
  14649. i__3 = *n - i__;
  14650. NUMlapack_dlarfg (&i__3, &a_ref (i__ + 1, i__), &a_ref (MIN (i__2, *n), i__), &c__1, &taui);
  14651. e[i__] = a_ref (i__ + 1, i__);
  14652. if (taui != 0.) {
  14653. /* Apply H(i) from both sides to A(i+1:n,i+1:n) */
  14654. a_ref (i__ + 1, i__) = 1.;
  14655. /* Compute x := tau * A * v storing y in TAU(i:n-1) */
  14656. i__2 = *n - i__;
  14657. NUMblas_dsymv (uplo, &i__2, &taui, &a_ref (i__ + 1, i__ + 1), lda, &a_ref (i__ + 1, i__), &c__1,
  14658. &c_b8, &tau[i__], &c__1);
  14659. /* Compute w := x - 1/2 * tau * (x'*v) * v */
  14660. i__2 = *n - i__;
  14661. alpha = taui * -.5 * NUMblas_ddot (&i__2, &tau[i__], &c__1, &a_ref (i__ + 1, i__), &c__1);
  14662. i__2 = *n - i__;
  14663. NUMblas_daxpy (&i__2, &alpha, &a_ref (i__ + 1, i__), &c__1, &tau[i__], &c__1);
  14664. /* Apply the transformation as a rank-2 update: A := A - v *
  14665. w' - w * v' */
  14666. i__2 = *n - i__;
  14667. NUMblas_dsyr2 (uplo, &i__2, &c_b14, &a_ref (i__ + 1, i__), &c__1, &tau[i__], &c__1, &a_ref (i__ + 1,
  14668. i__ + 1), lda);
  14669. a_ref (i__ + 1, i__) = e[i__];
  14670. }
  14671. d__[i__] = a_ref (i__, i__);
  14672. tau[i__] = taui;
  14673. /* L20: */
  14674. }
  14675. d__[*n] = a_ref (*n, *n);
  14676. }
  14677. return 0;
  14678. } /* NUMlapack_dsytd2 */
  14679. int NUMlapack_dsytrd (const char *uplo, integer *n, double *a, integer *lda, double *d__, double *e, double *tau,
  14680. double *work, integer *lwork, integer *info) {
  14681. /* Table of constant values */
  14682. static integer c__1 = 1;
  14683. static integer c_n1 = -1;
  14684. static integer c__3 = 3;
  14685. static integer c__2 = 2;
  14686. static double c_b22 = -1.;
  14687. static double c_b23 = 1.;
  14688. /* System generated locals */
  14689. integer a_dim1, a_offset, i__1, i__2, i__3;
  14690. /* Local variables */
  14691. static integer i__, j;
  14692. static integer nbmin, iinfo;
  14693. static integer upper;
  14694. static integer nb, kk, nx;
  14695. static integer ldwork, lwkopt;
  14696. static integer lquery;
  14697. static integer iws;
  14698. a_dim1 = *lda;
  14699. a_offset = 1 + a_dim1 * 1;
  14700. a -= a_offset;
  14701. --d__;
  14702. --e;
  14703. --tau;
  14704. --work;
  14705. /* Function Body */
  14706. *info = 0;
  14707. upper = lsame_ (uplo, "U");
  14708. lquery = *lwork == -1;
  14709. if (!upper && !lsame_ (uplo, "L")) {
  14710. *info = -1;
  14711. } else if (*n < 0) {
  14712. *info = -2;
  14713. } else if (*lda < MAX (1, *n)) {
  14714. *info = -4;
  14715. } else if (*lwork < 1 && !lquery) {
  14716. *info = -9;
  14717. }
  14718. if (*info == 0) {
  14719. /* Determine the block size. */
  14720. nb = NUMlapack_ilaenv (&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, 6, 1);
  14721. lwkopt = *n * nb;
  14722. work[1] = (double) lwkopt;
  14723. }
  14724. if (*info != 0) {
  14725. i__1 = - (*info);
  14726. xerbla_ ("DSYTRD", &i__1);
  14727. return 0;
  14728. } else if (lquery) {
  14729. return 0;
  14730. }
  14731. /* Quick return if possible */
  14732. if (*n == 0) {
  14733. work[1] = 1.;
  14734. return 0;
  14735. }
  14736. nx = *n;
  14737. iws = 1;
  14738. if (nb > 1 && nb < *n) {
  14739. /* Determine when to cross over from blocked to unblocked code (last
  14740. block is always handled by unblocked code).
  14741. Computing MAX */
  14742. i__1 = nb, i__2 = NUMlapack_ilaenv (&c__3, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, 6, 1);
  14743. nx = MAX (i__1, i__2);
  14744. if (nx < *n) {
  14745. /* Determine if workspace is large enough for blocked code. */
  14746. ldwork = *n;
  14747. iws = ldwork * nb;
  14748. if (*lwork < iws) {
  14749. /* Not enough workspace to use optimal NB: determine the
  14750. minimum value of NB, and reduce NB or force use of
  14751. unblocked code by setting NX = N.
  14752. Computing MAX */
  14753. i__1 = *lwork / ldwork;
  14754. nb = MAX (i__1, 1);
  14755. nbmin = NUMlapack_ilaenv (&c__2, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, 6, 1);
  14756. if (nb < nbmin) {
  14757. nx = *n;
  14758. }
  14759. }
  14760. } else {
  14761. nx = *n;
  14762. }
  14763. } else {
  14764. nb = 1;
  14765. }
  14766. if (upper) {
  14767. /* Reduce the upper triangle of A. Columns 1:kk are handled by the
  14768. unblocked method. */
  14769. kk = *n - (*n - nx + nb - 1) / nb * nb;
  14770. i__1 = kk + 1;
  14771. i__2 = -nb;
  14772. for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
  14773. /* Reduce columns i:i+nb-1 to tridiagonal form and form the
  14774. matrix W which is needed to update the unreduced part of the
  14775. matrix */
  14776. i__3 = i__ + nb - 1;
  14777. NUMlapack_dlatrd (uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &work[1], &ldwork);
  14778. /* Update the unreduced submatrix A(1:i-1,1:i-1), using an update
  14779. of the form: A := A - V*W' - W*V' */
  14780. i__3 = i__ - 1;
  14781. NUMblas_dsyr2k (uplo, "No transpose", &i__3, &nb, &c_b22, &a_ref (1, i__), lda, &work[1], &ldwork,
  14782. &c_b23, &a[a_offset], lda);
  14783. /* Copy superdiagonal elements back into A, and diagonal elements
  14784. into D */
  14785. i__3 = i__ + nb - 1;
  14786. for (j = i__; j <= i__3; ++j) {
  14787. a_ref (j - 1, j) = e[j - 1];
  14788. d__[j] = a_ref (j, j);
  14789. /* L10: */
  14790. }
  14791. /* L20: */
  14792. }
  14793. /* Use unblocked code to reduce the last or only block */
  14794. NUMlapack_dsytd2 (uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
  14795. } else {
  14796. /* Reduce the lower triangle of A */
  14797. i__2 = *n - nx;
  14798. i__1 = nb;
  14799. for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
  14800. /* Reduce columns i:i+nb-1 to tridiagonal form and form the
  14801. matrix W which is needed to update the unreduced part of the
  14802. matrix */
  14803. i__3 = *n - i__ + 1;
  14804. NUMlapack_dlatrd (uplo, &i__3, &nb, &a_ref (i__, i__), lda, &e[i__], &tau[i__], &work[1], &ldwork);
  14805. /* Update the unreduced submatrix A(i+ib:n,i+ib:n), using an
  14806. update of the form: A := A - V*W' - W*V' */
  14807. i__3 = *n - i__ - nb + 1;
  14808. NUMblas_dsyr2k (uplo, "No transpose", &i__3, &nb, &c_b22, &a_ref (i__ + nb, i__), lda, &work[nb + 1],
  14809. &ldwork, &c_b23, &a_ref (i__ + nb, i__ + nb), lda);
  14810. /* Copy subdiagonal elements back into A, and diagonal elements
  14811. into D */
  14812. i__3 = i__ + nb - 1;
  14813. for (j = i__; j <= i__3; ++j) {
  14814. a_ref (j + 1, j) = e[j];
  14815. d__[j] = a_ref (j, j);
  14816. /* L30: */
  14817. }
  14818. /* L40: */
  14819. }
  14820. /* Use unblocked code to reduce the last or only block */
  14821. i__1 = *n - i__ + 1;
  14822. NUMlapack_dsytd2 (uplo, &i__1, &a_ref (i__, i__), lda, &d__[i__], &e[i__], &tau[i__], &iinfo);
  14823. }
  14824. work[1] = (double) lwkopt;
  14825. return 0;
  14826. } /* NUMlapack_dsytrd */
  14827. #define b_ref(a_1,a_2) b[(a_2)*b_dim1 + a_1]
  14828. #define q_ref(a_1,a_2) q[(a_2)*q_dim1 + a_1]
  14829. #define u_ref(a_1,a_2) u[(a_2)*u_dim1 + a_1]
  14830. #define v_ref(a_1,a_2) v[(a_2)*v_dim1 + a_1]
  14831. int NUMlapack_dtgsja (const char *jobu, const char *jobv, const char *jobq, integer *m, integer *p, integer *n, integer *k, integer *l,
  14832. double *a, integer *lda, double *b, integer *ldb, double *tola, double *tolb, double *alpha, double *beta,
  14833. double *u, integer *ldu, double *v, integer *ldv, double *q, integer *ldq, double *work, integer *ncycle, integer *info) {
  14834. /* Table of constant values */
  14835. static double c_b13 = 0.;
  14836. static double c_b14 = 1.;
  14837. static integer c__1 = 1;
  14838. static double c_b43 = -1.;
  14839. /* System generated locals */
  14840. integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
  14841. i__3, i__4;
  14842. double d__1;
  14843. /* Local variables */
  14844. static integer i__, j;
  14845. static double gamma;
  14846. static double a1;
  14847. static integer initq;
  14848. static double a2, a3, b1;
  14849. static integer initu, initv, wantq, upper;
  14850. static double b2, b3;
  14851. static integer wantu, wantv;
  14852. static double error, ssmin;
  14853. static integer kcycle;
  14854. static double csq, csu, csv, snq, rwk, snu, snv;
  14855. a_dim1 = *lda;
  14856. a_offset = 1 + a_dim1 * 1;
  14857. a -= a_offset;
  14858. b_dim1 = *ldb;
  14859. b_offset = 1 + b_dim1 * 1;
  14860. b -= b_offset;
  14861. --alpha;
  14862. --beta;
  14863. u_dim1 = *ldu;
  14864. u_offset = 1 + u_dim1 * 1;
  14865. u -= u_offset;
  14866. v_dim1 = *ldv;
  14867. v_offset = 1 + v_dim1 * 1;
  14868. v -= v_offset;
  14869. q_dim1 = *ldq;
  14870. q_offset = 1 + q_dim1 * 1;
  14871. q -= q_offset;
  14872. --work;
  14873. /* Function Body */
  14874. initu = lsame_ (jobu, "I");
  14875. wantu = initu || lsame_ (jobu, "U");
  14876. initv = lsame_ (jobv, "I");
  14877. wantv = initv || lsame_ (jobv, "V");
  14878. initq = lsame_ (jobq, "I");
  14879. wantq = initq || lsame_ (jobq, "Q");
  14880. *info = 0;
  14881. if (! (initu || wantu || lsame_ (jobu, "N"))) {
  14882. *info = -1;
  14883. } else if (! (initv || wantv || lsame_ (jobv, "N"))) {
  14884. *info = -2;
  14885. } else if (! (initq || wantq || lsame_ (jobq, "N"))) {
  14886. *info = -3;
  14887. } else if (*m < 0) {
  14888. *info = -4;
  14889. } else if (*p < 0) {
  14890. *info = -5;
  14891. } else if (*n < 0) {
  14892. *info = -6;
  14893. } else if (*lda < MAX (1, *m)) {
  14894. *info = -10;
  14895. } else if (*ldb < MAX (1, *p)) {
  14896. *info = -12;
  14897. } else if (*ldu < 1 || wantu && *ldu < *m) {
  14898. *info = -18;
  14899. } else if (*ldv < 1 || wantv && *ldv < *p) {
  14900. *info = -20;
  14901. } else if (*ldq < 1 || wantq && *ldq < *n) {
  14902. *info = -22;
  14903. }
  14904. if (*info != 0) {
  14905. i__1 = - (*info);
  14906. xerbla_ ("DTGSJA", &i__1);
  14907. return 0;
  14908. }
  14909. /* Initialize U, V and Q, if necessary */
  14910. if (initu) {
  14911. NUMlapack_dlaset ("Full", m, m, &c_b13, &c_b14, &u[u_offset], ldu);
  14912. }
  14913. if (initv) {
  14914. NUMlapack_dlaset ("Full", p, p, &c_b13, &c_b14, &v[v_offset], ldv);
  14915. }
  14916. if (initq) {
  14917. NUMlapack_dlaset ("Full", n, n, &c_b13, &c_b14, &q[q_offset], ldq);
  14918. }
  14919. /* Loop until convergence */
  14920. upper = FALSE;
  14921. for (kcycle = 1; kcycle <= 40; ++kcycle) {
  14922. upper = !upper;
  14923. i__1 = *l - 1;
  14924. for (i__ = 1; i__ <= i__1; ++i__) {
  14925. i__2 = *l;
  14926. for (j = i__ + 1; j <= i__2; ++j) {
  14927. a1 = 0.;
  14928. a2 = 0.;
  14929. a3 = 0.;
  14930. if (*k + i__ <= *m) {
  14931. a1 = a_ref (*k + i__, *n - *l + i__);
  14932. }
  14933. if (*k + j <= *m) {
  14934. a3 = a_ref (*k + j, *n - *l + j);
  14935. }
  14936. b1 = b_ref (i__, *n - *l + i__);
  14937. b3 = b_ref (j, *n - *l + j);
  14938. if (upper) {
  14939. if (*k + i__ <= *m) {
  14940. a2 = a_ref (*k + i__, *n - *l + j);
  14941. }
  14942. b2 = b_ref (i__, *n - *l + j);
  14943. } else {
  14944. if (*k + j <= *m) {
  14945. a2 = a_ref (*k + j, *n - *l + i__);
  14946. }
  14947. b2 = b_ref (j, *n - *l + i__);
  14948. }
  14949. NUMlapack_dlags2 (&upper, &a1, &a2, &a3, &b1, &b2, &b3, &csu, &snu, &csv, &snv, &csq, &snq);
  14950. /* Update (K+I)-th and (K+J)-th rows of matrix A: U'*A */
  14951. if (*k + j <= *m) {
  14952. NUMblas_drot (l, &a_ref (*k + j, *n - *l + 1), lda, &a_ref (*k + i__, *n - *l + 1), lda, &csu,
  14953. &snu);
  14954. }
  14955. /* Update I-th and J-th rows of matrix B: V'*B */
  14956. NUMblas_drot (l, &b_ref (j, *n - *l + 1), ldb, &b_ref (i__, *n - *l + 1), ldb, &csv, &snv);
  14957. /* Update (N-L+I)-th and (N-L+J)-th columns of matrices A and
  14958. B: A*Q and B*Q
  14959. Computing MIN */
  14960. i__4 = *k + *l;
  14961. i__3 = MIN (i__4, *m);
  14962. NUMblas_drot (&i__3, &a_ref (1, *n - *l + j), &c__1, &a_ref (1, *n - *l + i__), &c__1, &csq, &snq);
  14963. NUMblas_drot (l, &b_ref (1, *n - *l + j), &c__1, &b_ref (1, *n - *l + i__), &c__1, &csq, &snq);
  14964. if (upper) {
  14965. if (*k + i__ <= *m) {
  14966. a_ref (*k + i__, *n - *l + j) = 0.;
  14967. }
  14968. b_ref (i__, *n - *l + j) = 0.;
  14969. } else {
  14970. if (*k + j <= *m) {
  14971. a_ref (*k + j, *n - *l + i__) = 0.;
  14972. }
  14973. b_ref (j, *n - *l + i__) = 0.;
  14974. }
  14975. /* Update orthogonal matrices U, V, Q, if desired. */
  14976. if (wantu && *k + j <= *m) {
  14977. NUMblas_drot (m, &u_ref (1, *k + j), &c__1, &u_ref (1, *k + i__), &c__1, &csu, &snu);
  14978. }
  14979. if (wantv) {
  14980. NUMblas_drot (p, &v_ref (1, j), &c__1, &v_ref (1, i__), &c__1, &csv, &snv);
  14981. }
  14982. if (wantq) {
  14983. NUMblas_drot (n, &q_ref (1, *n - *l + j), &c__1, &q_ref (1, *n - *l + i__), &c__1, &csq, &snq);
  14984. }
  14985. /* L10: */
  14986. }
  14987. /* L20: */
  14988. }
  14989. if (!upper) {
  14990. /* The matrices A13 and B13 were lower triangular at the start of
  14991. the cycle, and are now upper triangular.
  14992. Convergence test: test the parallelism of the corresponding
  14993. rows of A and B. */
  14994. error = 0.;
  14995. /* Computing MIN */
  14996. i__2 = *l, i__3 = *m - *k;
  14997. i__1 = MIN (i__2, i__3);
  14998. for (i__ = 1; i__ <= i__1; ++i__) {
  14999. i__2 = *l - i__ + 1;
  15000. NUMblas_dcopy (&i__2, &a_ref (*k + i__, *n - *l + i__), lda, &work[1], &c__1);
  15001. i__2 = *l - i__ + 1;
  15002. NUMblas_dcopy (&i__2, &b_ref (i__, *n - *l + i__), ldb, &work[*l + 1], &c__1);
  15003. i__2 = *l - i__ + 1;
  15004. NUMlapack_dlapll (&i__2, &work[1], &c__1, &work[*l + 1], &c__1, &ssmin);
  15005. error = MAX (error, ssmin);
  15006. /* L30: */
  15007. }
  15008. if (fabs (error) <= MIN (*tola, *tolb)) {
  15009. goto L50;
  15010. }
  15011. }
  15012. /* End of cycle loop
  15013. L40: */
  15014. }
  15015. /* The algorithm has not converged after MAXIT cycles. */
  15016. *info = 1;
  15017. goto L100;
  15018. L50:
  15019. /* If ERROR <= MIN(TOLA,TOLB), then the algorithm has converged. Compute
  15020. the generalized singular value pairs (ALPHA, BETA), and set the
  15021. triangular matrix R to array A. */
  15022. i__1 = *k;
  15023. for (i__ = 1; i__ <= i__1; ++i__) {
  15024. alpha[i__] = 1.;
  15025. beta[i__] = 0.;
  15026. /* L60: */
  15027. }
  15028. /* Computing MIN */
  15029. i__2 = *l, i__3 = *m - *k;
  15030. i__1 = MIN (i__2, i__3);
  15031. for (i__ = 1; i__ <= i__1; ++i__) {
  15032. a1 = a_ref (*k + i__, *n - *l + i__);
  15033. b1 = b_ref (i__, *n - *l + i__);
  15034. if (a1 != 0.) {
  15035. gamma = b1 / a1;
  15036. /* change sign if necessary */
  15037. if (gamma < 0.) {
  15038. i__2 = *l - i__ + 1;
  15039. NUMblas_dscal (&i__2, &c_b43, &b_ref (i__, *n - *l + i__), ldb);
  15040. if (wantv) {
  15041. NUMblas_dscal (p, &c_b43, &v_ref (1, i__), &c__1);
  15042. }
  15043. }
  15044. d__1 = fabs (gamma);
  15045. NUMlapack_dlartg (&d__1, &c_b14, &beta[*k + i__], &alpha[*k + i__], &rwk);
  15046. if (alpha[*k + i__] >= beta[*k + i__]) {
  15047. i__2 = *l - i__ + 1;
  15048. d__1 = 1. / alpha[*k + i__];
  15049. NUMblas_dscal (&i__2, &d__1, &a_ref (*k + i__, *n - *l + i__), lda);
  15050. } else {
  15051. i__2 = *l - i__ + 1;
  15052. d__1 = 1. / beta[*k + i__];
  15053. NUMblas_dscal (&i__2, &d__1, &b_ref (i__, *n - *l + i__), ldb);
  15054. i__2 = *l - i__ + 1;
  15055. NUMblas_dcopy (&i__2, &b_ref (i__, *n - *l + i__), ldb, &a_ref (*k + i__, *n - *l + i__), lda);
  15056. }
  15057. } else {
  15058. alpha[*k + i__] = 0.;
  15059. beta[*k + i__] = 1.;
  15060. i__2 = *l - i__ + 1;
  15061. NUMblas_dcopy (&i__2, &b_ref (i__, *n - *l + i__), ldb, &a_ref (*k + i__, *n - *l + i__), lda);
  15062. }
  15063. /* L70: */
  15064. }
  15065. /* Post-assignment */
  15066. i__1 = *k + *l;
  15067. for (i__ = *m + 1; i__ <= i__1; ++i__) {
  15068. alpha[i__] = 0.;
  15069. beta[i__] = 1.;
  15070. /* L80: */
  15071. }
  15072. if (*k + *l < *n) {
  15073. i__1 = *n;
  15074. for (i__ = *k + *l + 1; i__ <= i__1; ++i__) {
  15075. alpha[i__] = 0.;
  15076. beta[i__] = 0.;
  15077. /* L90: */
  15078. }
  15079. }
  15080. L100:
  15081. *ncycle = kcycle;
  15082. return 0;
  15083. } /* NUMlapack_dtgsja */
  15084. #undef v_ref
  15085. #undef u_ref
  15086. #undef q_ref
  15087. #undef b_ref
  15088. int NUMlapack_dtrevc (const char *side, const char *howmny, int *select, integer *n, double *t, integer *ldt, double *vl,
  15089. integer *ldvl, double *vr, integer *ldvr, integer *mm, integer *m, double *work, integer *info) {
  15090. /* Table of constant values */
  15091. static int c_false = FALSE;
  15092. static integer c__1 = 1;
  15093. static double c_b22 = 1.;
  15094. static double c_b25 = 0.;
  15095. static integer c__2 = 2;
  15096. static int c_true = TRUE;
  15097. /* System generated locals */
  15098. integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, i__2, i__3;
  15099. double d__1, d__2, d__3, d__4, d__5, d__6;
  15100. /* Local variables */
  15101. static double beta, emax;
  15102. static int pair;
  15103. static int allv;
  15104. static integer ierr;
  15105. static double unfl, ovfl, smin;
  15106. static int over;
  15107. static double vmax;
  15108. static integer jnxt, i__, j, k;
  15109. static double scale, x[4] /* was [2][2] */ ;
  15110. static double remax;
  15111. static int leftv, bothv;
  15112. static double vcrit;
  15113. static int somev;
  15114. static integer j1, j2, n2;
  15115. static double xnorm;
  15116. static integer ii, ki;
  15117. static integer ip, is;
  15118. static double wi;
  15119. static double wr;
  15120. static double bignum;
  15121. static int rightv;
  15122. static double smlnum, rec, ulp;
  15123. #define t_ref(a_1,a_2) t[(a_2)*t_dim1 + a_1]
  15124. #define x_ref(a_1,a_2) x[(a_2)*2 + a_1 - 3]
  15125. #define vl_ref(a_1,a_2) vl[(a_2)*vl_dim1 + a_1]
  15126. #define vr_ref(a_1,a_2) vr[(a_2)*vr_dim1 + a_1]
  15127. --select;
  15128. t_dim1 = *ldt;
  15129. t_offset = 1 + t_dim1 * 1;
  15130. t -= t_offset;
  15131. vl_dim1 = *ldvl;
  15132. vl_offset = 1 + vl_dim1 * 1;
  15133. vl -= vl_offset;
  15134. vr_dim1 = *ldvr;
  15135. vr_offset = 1 + vr_dim1 * 1;
  15136. vr -= vr_offset;
  15137. --work;
  15138. /* Function Body */
  15139. bothv = lsame_ (side, "B");
  15140. rightv = lsame_ (side, "R") || bothv;
  15141. leftv = lsame_ (side, "L") || bothv;
  15142. allv = lsame_ (howmny, "A");
  15143. over = lsame_ (howmny, "B");
  15144. somev = lsame_ (howmny, "S");
  15145. *info = 0;
  15146. if (!rightv && !leftv) {
  15147. *info = -1;
  15148. } else if (!allv && !over && !somev) {
  15149. *info = -2;
  15150. } else if (*n < 0) {
  15151. *info = -4;
  15152. } else if (*ldt < MAX (1, *n)) {
  15153. *info = -6;
  15154. } else if (*ldvl < 1 || leftv && *ldvl < *n) {
  15155. *info = -8;
  15156. } else if (*ldvr < 1 || rightv && *ldvr < *n) {
  15157. *info = -10;
  15158. } else {
  15159. /* Set M to the number of columns required to store the selected
  15160. eigenvectors, standardize the array SELECT if necessary, and test
  15161. MM. */
  15162. if (somev) {
  15163. *m = 0;
  15164. pair = FALSE;
  15165. i__1 = *n;
  15166. for (j = 1; j <= i__1; ++j) {
  15167. if (pair) {
  15168. pair = FALSE;
  15169. select[j] = FALSE;
  15170. } else {
  15171. if (j < *n) {
  15172. if (t_ref (j + 1, j) == 0.) {
  15173. if (select[j]) {
  15174. ++ (*m);
  15175. }
  15176. } else {
  15177. pair = TRUE;
  15178. if (select[j] || select[j + 1]) {
  15179. select[j] = TRUE;
  15180. *m += 2;
  15181. }
  15182. }
  15183. } else {
  15184. if (select[*n]) {
  15185. ++ (*m);
  15186. }
  15187. }
  15188. }
  15189. /* L10: */
  15190. }
  15191. } else {
  15192. *m = *n;
  15193. }
  15194. if (*mm < *m) {
  15195. *info = -11;
  15196. }
  15197. }
  15198. if (*info != 0) {
  15199. i__1 = - (*info);
  15200. xerbla_ ("NUMlapack_dtrevc", &i__1);
  15201. return 0;
  15202. }
  15203. /* Quick return if possible. */
  15204. if (*n == 0) {
  15205. return 0;
  15206. }
  15207. /* Set the constants to control overflow. */
  15208. unfl = NUMblas_dlamch ("Safe minimum");
  15209. ovfl = 1. / unfl;
  15210. NUMlapack_dlabad (&unfl, &ovfl);
  15211. ulp = NUMblas_dlamch ("Precision");
  15212. smlnum = unfl * (*n / ulp);
  15213. bignum = (1. - ulp) / smlnum;
  15214. /* Compute 1-norm of each column of strictly upper triangular part of T
  15215. to control overflow in triangular solver. */
  15216. work[1] = 0.;
  15217. i__1 = *n;
  15218. for (j = 2; j <= i__1; ++j) {
  15219. work[j] = 0.;
  15220. i__2 = j - 1;
  15221. for (i__ = 1; i__ <= i__2; ++i__) {
  15222. work[j] += (d__1 = t_ref (i__, j), fabs (d__1));
  15223. /* L20: */
  15224. }
  15225. /* L30: */
  15226. }
  15227. /* Index IP is used to specify the real or complex eigenvalue: IP = 0,
  15228. real eigenvalue, 1, first of conjugate complex pair: (wr,wi) -1,
  15229. second of conjugate complex pair: (wr,wi) */
  15230. n2 = *n << 1;
  15231. if (rightv) {
  15232. /* Compute right eigenvectors. */
  15233. ip = 0;
  15234. is = *m;
  15235. for (ki = *n; ki >= 1; --ki) {
  15236. if (ip == 1) {
  15237. goto L130;
  15238. }
  15239. if (ki == 1) {
  15240. goto L40;
  15241. }
  15242. if (t_ref (ki, ki - 1) == 0.) {
  15243. goto L40;
  15244. }
  15245. ip = -1;
  15246. L40:
  15247. if (somev) {
  15248. if (ip == 0) {
  15249. if (!select[ki]) {
  15250. goto L130;
  15251. }
  15252. } else {
  15253. if (!select[ki - 1]) {
  15254. goto L130;
  15255. }
  15256. }
  15257. }
  15258. /* Compute the KI-th eigenvalue (WR,WI). */
  15259. wr = t_ref (ki, ki);
  15260. wi = 0.;
  15261. if (ip != 0) {
  15262. wi = sqrt ( (d__1 = t_ref (ki, ki - 1), fabs (d__1))) * sqrt ( (d__2 =
  15263. t_ref (ki - 1, ki), fabs (d__2)));
  15264. }
  15265. /* Computing MAX */
  15266. d__1 = ulp * (fabs (wr) + fabs (wi));
  15267. smin = MAX (d__1, smlnum);
  15268. if (ip == 0) {
  15269. /* Real right eigenvector */
  15270. work[ki + *n] = 1.;
  15271. /* Form right-hand side */
  15272. i__1 = ki - 1;
  15273. for (k = 1; k <= i__1; ++k) {
  15274. work[k + *n] = -t_ref (k, ki);
  15275. /* L50: */
  15276. }
  15277. /* Solve the upper quasi-triangular system: (T(1:KI-1,1:KI-1)
  15278. - WR)*X = SCALE*WORK. */
  15279. jnxt = ki - 1;
  15280. for (j = ki - 1; j >= 1; --j) {
  15281. if (j > jnxt) {
  15282. goto L60;
  15283. }
  15284. j1 = j;
  15285. j2 = j;
  15286. jnxt = j - 1;
  15287. if (j > 1) {
  15288. if (t_ref (j, j - 1) != 0.) {
  15289. j1 = j - 1;
  15290. jnxt = j - 2;
  15291. }
  15292. }
  15293. if (j1 == j2) {
  15294. /* 1-by-1 diagonal block */
  15295. NUMlapack_dlaln2 (&c_false, &c__1, &c__1, &smin, &c_b22, &t_ref (j, j), ldt, &c_b22,
  15296. &c_b22, &work[j + *n], n, &wr, &c_b25, x, &c__2, &scale, &xnorm, &ierr);
  15297. /* Scale X(1,1) to avoid overflow when updating the
  15298. right-hand side. */
  15299. if (xnorm > 1.) {
  15300. if (work[j] > bignum / xnorm) {
  15301. x_ref (1, 1) = x_ref (1, 1) / xnorm;
  15302. scale /= xnorm;
  15303. }
  15304. }
  15305. /* Scale if necessary */
  15306. if (scale != 1.) {
  15307. NUMblas_dscal (&ki, &scale, &work[*n + 1], &c__1);
  15308. }
  15309. work[j + *n] = x_ref (1, 1);
  15310. /* Update right-hand side */
  15311. i__1 = j - 1;
  15312. d__1 = -x_ref (1, 1);
  15313. NUMblas_daxpy (&i__1, &d__1, &t_ref (1, j), &c__1, &work[*n + 1], &c__1);
  15314. } else {
  15315. /* 2-by-2 diagonal block */
  15316. NUMlapack_dlaln2 (&c_false, &c__2, &c__1, &smin, &c_b22, &t_ref (j - 1, j - 1), ldt,
  15317. &c_b22, &c_b22, &work[j - 1 + *n], n, &wr, &c_b25, x, &c__2, &scale, &xnorm,
  15318. &ierr);
  15319. /* Scale X(1,1) and X(2,1) to avoid overflow when
  15320. updating the right-hand side. */
  15321. if (xnorm > 1.) {
  15322. /* Computing MAX */
  15323. d__1 = work[j - 1], d__2 = work[j];
  15324. beta = MAX (d__1, d__2);
  15325. if (beta > bignum / xnorm) {
  15326. x_ref (1, 1) = x_ref (1, 1) / xnorm;
  15327. x_ref (2, 1) = x_ref (2, 1) / xnorm;
  15328. scale /= xnorm;
  15329. }
  15330. }
  15331. /* Scale if necessary */
  15332. if (scale != 1.) {
  15333. NUMblas_dscal (&ki, &scale, &work[*n + 1], &c__1);
  15334. }
  15335. work[j - 1 + *n] = x_ref (1, 1);
  15336. work[j + *n] = x_ref (2, 1);
  15337. /* Update right-hand side */
  15338. i__1 = j - 2;
  15339. d__1 = -x_ref (1, 1);
  15340. NUMblas_daxpy (&i__1, &d__1, &t_ref (1, j - 1), &c__1, &work[*n + 1], &c__1);
  15341. i__1 = j - 2;
  15342. d__1 = -x_ref (2, 1);
  15343. NUMblas_daxpy (&i__1, &d__1, &t_ref (1, j), &c__1, &work[*n + 1], &c__1);
  15344. }
  15345. L60:
  15346. ;
  15347. }
  15348. /* Copy the vector x or Q*x to VR and normalize. */
  15349. if (!over) {
  15350. NUMblas_dcopy (&ki, &work[*n + 1], &c__1, &vr_ref (1, is), &c__1);
  15351. ii = NUMblas_idamax (&ki, &vr_ref (1, is), &c__1);
  15352. remax = 1. / (d__1 = vr_ref (ii, is), fabs (d__1));
  15353. NUMblas_dscal (&ki, &remax, &vr_ref (1, is), &c__1);
  15354. i__1 = *n;
  15355. for (k = ki + 1; k <= i__1; ++k) {
  15356. vr_ref (k, is) = 0.;
  15357. /* L70: */
  15358. }
  15359. } else {
  15360. if (ki > 1) {
  15361. i__1 = ki - 1;
  15362. NUMblas_dgemv ("N", n, &i__1, &c_b22, &vr[vr_offset], ldvr, &work[*n + 1], &c__1,
  15363. &work[ki + *n], &vr_ref (1, ki), &c__1);
  15364. }
  15365. ii = NUMblas_idamax (n, &vr_ref (1, ki), &c__1);
  15366. remax = 1. / (d__1 = vr_ref (ii, ki), fabs (d__1));
  15367. NUMblas_dscal (n, &remax, &vr_ref (1, ki), &c__1);
  15368. }
  15369. } else {
  15370. /* Complex right eigenvector.
  15371. Initial solve [ (T(KI-1,KI-1) T(KI-1,KI) ) - (WR + I*
  15372. WI)]*X = 0. [ (T(KI,KI-1) T(KI,KI) ) ] */
  15373. if ( (d__1 = t_ref (ki - 1, ki), fabs (d__1)) >= (d__2 = t_ref (ki, ki - 1), fabs (d__2))) {
  15374. work[ki - 1 + *n] = 1.;
  15375. work[ki + n2] = wi / t_ref (ki - 1, ki);
  15376. } else {
  15377. work[ki - 1 + *n] = -wi / t_ref (ki, ki - 1);
  15378. work[ki + n2] = 1.;
  15379. }
  15380. work[ki + *n] = 0.;
  15381. work[ki - 1 + n2] = 0.;
  15382. /* Form right-hand side */
  15383. i__1 = ki - 2;
  15384. for (k = 1; k <= i__1; ++k) {
  15385. work[k + *n] = -work[ki - 1 + *n] * t_ref (k, ki - 1);
  15386. work[k + n2] = -work[ki + n2] * t_ref (k, ki);
  15387. /* L80: */
  15388. }
  15389. /* Solve upper quasi-triangular system: (T(1:KI-2,1:KI-2) -
  15390. (WR+i*WI))*X = SCALE*(WORK+i*WORK2) */
  15391. jnxt = ki - 2;
  15392. for (j = ki - 2; j >= 1; --j) {
  15393. if (j > jnxt) {
  15394. goto L90;
  15395. }
  15396. j1 = j;
  15397. j2 = j;
  15398. jnxt = j - 1;
  15399. if (j > 1) {
  15400. if (t_ref (j, j - 1) != 0.) {
  15401. j1 = j - 1;
  15402. jnxt = j - 2;
  15403. }
  15404. }
  15405. if (j1 == j2) {
  15406. /* 1-by-1 diagonal block */
  15407. NUMlapack_dlaln2 (&c_false, &c__1, &c__2, &smin, &c_b22, &t_ref (j, j), ldt, &c_b22,
  15408. &c_b22, &work[j + *n], n, &wr, &wi, x, &c__2, &scale, &xnorm, &ierr);
  15409. /* Scale X(1,1) and X(1,2) to avoid overflow when
  15410. updating the right-hand side. */
  15411. if (xnorm > 1.) {
  15412. if (work[j] > bignum / xnorm) {
  15413. x_ref (1, 1) = x_ref (1, 1) / xnorm;
  15414. x_ref (1, 2) = x_ref (1, 2) / xnorm;
  15415. scale /= xnorm;
  15416. }
  15417. }
  15418. /* Scale if necessary */
  15419. if (scale != 1.) {
  15420. NUMblas_dscal (&ki, &scale, &work[*n + 1], &c__1);
  15421. NUMblas_dscal (&ki, &scale, &work[n2 + 1], &c__1);
  15422. }
  15423. work[j + *n] = x_ref (1, 1);
  15424. work[j + n2] = x_ref (1, 2);
  15425. /* Update the right-hand side */
  15426. i__1 = j - 1;
  15427. d__1 = -x_ref (1, 1);
  15428. NUMblas_daxpy (&i__1, &d__1, &t_ref (1, j), &c__1, &work[*n + 1], &c__1);
  15429. i__1 = j - 1;
  15430. d__1 = -x_ref (1, 2);
  15431. NUMblas_daxpy (&i__1, &d__1, &t_ref (1, j), &c__1, &work[n2 + 1], &c__1);
  15432. } else {
  15433. /* 2-by-2 diagonal block */
  15434. NUMlapack_dlaln2 (&c_false, &c__2, &c__2, &smin, &c_b22, &t_ref (j - 1, j - 1), ldt,
  15435. &c_b22, &c_b22, &work[j - 1 + *n], n, &wr, &wi, x, &c__2, &scale, &xnorm, &ierr);
  15436. /* Scale X to avoid overflow when updating the
  15437. right-hand side. */
  15438. if (xnorm > 1.) {
  15439. /* Computing MAX */
  15440. d__1 = work[j - 1], d__2 = work[j];
  15441. beta = MAX (d__1, d__2);
  15442. if (beta > bignum / xnorm) {
  15443. rec = 1. / xnorm;
  15444. x_ref (1, 1) = x_ref (1, 1) * rec;
  15445. x_ref (1, 2) = x_ref (1, 2) * rec;
  15446. x_ref (2, 1) = x_ref (2, 1) * rec;
  15447. x_ref (2, 2) = x_ref (2, 2) * rec;
  15448. scale *= rec;
  15449. }
  15450. }
  15451. /* Scale if necessary */
  15452. if (scale != 1.) {
  15453. NUMblas_dscal (&ki, &scale, &work[*n + 1], &c__1);
  15454. NUMblas_dscal (&ki, &scale, &work[n2 + 1], &c__1);
  15455. }
  15456. work[j - 1 + *n] = x_ref (1, 1);
  15457. work[j + *n] = x_ref (2, 1);
  15458. work[j - 1 + n2] = x_ref (1, 2);
  15459. work[j + n2] = x_ref (2, 2);
  15460. /* Update the right-hand side */
  15461. i__1 = j - 2;
  15462. d__1 = -x_ref (1, 1);
  15463. NUMblas_daxpy (&i__1, &d__1, &t_ref (1, j - 1), &c__1, &work[*n + 1], &c__1);
  15464. i__1 = j - 2;
  15465. d__1 = -x_ref (2, 1);
  15466. NUMblas_daxpy (&i__1, &d__1, &t_ref (1, j), &c__1, &work[*n + 1], &c__1);
  15467. i__1 = j - 2;
  15468. d__1 = -x_ref (1, 2);
  15469. NUMblas_daxpy (&i__1, &d__1, &t_ref (1, j - 1), &c__1, &work[n2 + 1], &c__1);
  15470. i__1 = j - 2;
  15471. d__1 = -x_ref (2, 2);
  15472. NUMblas_daxpy (&i__1, &d__1, &t_ref (1, j), &c__1, &work[n2 + 1], &c__1);
  15473. }
  15474. L90:
  15475. ;
  15476. }
  15477. /* Copy the vector x or Q*x to VR and normalize. */
  15478. if (!over) {
  15479. NUMblas_dcopy (&ki, &work[*n + 1], &c__1, &vr_ref (1, is - 1), &c__1);
  15480. NUMblas_dcopy (&ki, &work[n2 + 1], &c__1, &vr_ref (1, is), &c__1);
  15481. emax = 0.;
  15482. i__1 = ki;
  15483. for (k = 1; k <= i__1; ++k) {
  15484. /* Computing MAX */
  15485. d__3 = emax, d__4 = (d__1 = vr_ref (k, is - 1), fabs (d__1)) + (d__2 =
  15486. vr_ref (k, is), fabs (d__2));
  15487. emax = MAX (d__3, d__4);
  15488. /* L100: */
  15489. }
  15490. remax = 1. / emax;
  15491. NUMblas_dscal (&ki, &remax, &vr_ref (1, is - 1), &c__1);
  15492. NUMblas_dscal (&ki, &remax, &vr_ref (1, is), &c__1);
  15493. i__1 = *n;
  15494. for (k = ki + 1; k <= i__1; ++k) {
  15495. vr_ref (k, is - 1) = 0.;
  15496. vr_ref (k, is) = 0.;
  15497. /* L110: */
  15498. }
  15499. } else {
  15500. if (ki > 2) {
  15501. i__1 = ki - 2;
  15502. NUMblas_dgemv ("N", n, &i__1, &c_b22, &vr[vr_offset], ldvr, &work[*n + 1], &c__1,
  15503. &work[ki - 1 + *n], &vr_ref (1, ki - 1), &c__1);
  15504. i__1 = ki - 2;
  15505. NUMblas_dgemv ("N", n, &i__1, &c_b22, &vr[vr_offset], ldvr, &work[n2 + 1], &c__1,
  15506. &work[ki + n2], &vr_ref (1, ki), &c__1);
  15507. } else {
  15508. NUMblas_dscal (n, &work[ki - 1 + *n], &vr_ref (1, ki - 1), &c__1);
  15509. NUMblas_dscal (n, &work[ki + n2], &vr_ref (1, ki), &c__1);
  15510. }
  15511. emax = 0.;
  15512. i__1 = *n;
  15513. for (k = 1; k <= i__1; ++k) {
  15514. /* Computing MAX */
  15515. d__3 = emax, d__4 = (d__1 = vr_ref (k, ki - 1), fabs (d__1)) + (d__2 =
  15516. vr_ref (k, ki), fabs (d__2));
  15517. emax = MAX (d__3, d__4);
  15518. /* L120: */
  15519. }
  15520. remax = 1. / emax;
  15521. NUMblas_dscal (n, &remax, &vr_ref (1, ki - 1), &c__1);
  15522. NUMblas_dscal (n, &remax, &vr_ref (1, ki), &c__1);
  15523. }
  15524. }
  15525. --is;
  15526. if (ip != 0) {
  15527. --is;
  15528. }
  15529. L130:
  15530. if (ip == 1) {
  15531. ip = 0;
  15532. }
  15533. if (ip == -1) {
  15534. ip = 1;
  15535. }
  15536. /* L140: */
  15537. }
  15538. }
  15539. if (leftv) {
  15540. /* Compute left eigenvectors. */
  15541. ip = 0;
  15542. is = 1;
  15543. i__1 = *n;
  15544. for (ki = 1; ki <= i__1; ++ki) {
  15545. if (ip == -1) {
  15546. goto L250;
  15547. }
  15548. if (ki == *n) {
  15549. goto L150;
  15550. }
  15551. if (t_ref (ki + 1, ki) == 0.) {
  15552. goto L150;
  15553. }
  15554. ip = 1;
  15555. L150:
  15556. if (somev) {
  15557. if (!select[ki]) {
  15558. goto L250;
  15559. }
  15560. }
  15561. /* Compute the KI-th eigenvalue (WR,WI). */
  15562. wr = t_ref (ki, ki);
  15563. wi = 0.;
  15564. if (ip != 0) {
  15565. wi = sqrt ( (d__1 = t_ref (ki, ki + 1), fabs (d__1))) * sqrt ( (d__2 =
  15566. t_ref (ki + 1, ki), fabs (d__2)));
  15567. }
  15568. /* Computing MAX */
  15569. d__1 = ulp * (fabs (wr) + fabs (wi));
  15570. smin = MAX (d__1, smlnum);
  15571. if (ip == 0) {
  15572. /* Real left eigenvector. */
  15573. work[ki + *n] = 1.;
  15574. /* Form right-hand side */
  15575. i__2 = *n;
  15576. for (k = ki + 1; k <= i__2; ++k) {
  15577. work[k + *n] = -t_ref (ki, k);
  15578. /* L160: */
  15579. }
  15580. /* Solve the quasi-triangular system: (T(KI+1:N,KI+1:N) -
  15581. WR)'*X = SCALE*WORK */
  15582. vmax = 1.;
  15583. vcrit = bignum;
  15584. jnxt = ki + 1;
  15585. i__2 = *n;
  15586. for (j = ki + 1; j <= i__2; ++j) {
  15587. if (j < jnxt) {
  15588. goto L170;
  15589. }
  15590. j1 = j;
  15591. j2 = j;
  15592. jnxt = j + 1;
  15593. if (j < *n) {
  15594. if (t_ref (j + 1, j) != 0.) {
  15595. j2 = j + 1;
  15596. jnxt = j + 2;
  15597. }
  15598. }
  15599. if (j1 == j2) {
  15600. /* 1-by-1 diagonal block
  15601. Scale if necessary to avoid overflow when forming
  15602. the right-hand side. */
  15603. if (work[j] > vcrit) {
  15604. rec = 1. / vmax;
  15605. i__3 = *n - ki + 1;
  15606. NUMblas_dscal (&i__3, &rec, &work[ki + *n], &c__1);
  15607. vmax = 1.;
  15608. vcrit = bignum;
  15609. }
  15610. i__3 = j - ki - 1;
  15611. work[j + *n] -=
  15612. NUMblas_ddot (&i__3, &t_ref (ki + 1, j), &c__1, &work[ki + 1 + *n], &c__1);
  15613. /* Solve (T(J,J)-WR)'*X = WORK */
  15614. NUMlapack_dlaln2 (&c_false, &c__1, &c__1, &smin, &c_b22, &t_ref (j, j), ldt, &c_b22,
  15615. &c_b22, &work[j + *n], n, &wr, &c_b25, x, &c__2, &scale, &xnorm, &ierr);
  15616. /* Scale if necessary */
  15617. if (scale != 1.) {
  15618. i__3 = *n - ki + 1;
  15619. NUMblas_dscal (&i__3, &scale, &work[ki + *n], &c__1);
  15620. }
  15621. work[j + *n] = x_ref (1, 1);
  15622. /* Computing MAX */
  15623. d__2 = (d__1 = work[j + *n], fabs (d__1));
  15624. vmax = MAX (d__2, vmax);
  15625. vcrit = bignum / vmax;
  15626. } else {
  15627. /* 2-by-2 diagonal block
  15628. Scale if necessary to avoid overflow when forming
  15629. the right-hand side.
  15630. Computing MAX */
  15631. d__1 = work[j], d__2 = work[j + 1];
  15632. beta = MAX (d__1, d__2);
  15633. if (beta > vcrit) {
  15634. rec = 1. / vmax;
  15635. i__3 = *n - ki + 1;
  15636. NUMblas_dscal (&i__3, &rec, &work[ki + *n], &c__1);
  15637. vmax = 1.;
  15638. vcrit = bignum;
  15639. }
  15640. i__3 = j - ki - 1;
  15641. work[j + *n] -=
  15642. NUMblas_ddot (&i__3, &t_ref (ki + 1, j), &c__1, &work[ki + 1 + *n], &c__1);
  15643. i__3 = j - ki - 1;
  15644. work[j + 1 + *n] -=
  15645. NUMblas_ddot (&i__3, &t_ref (ki + 1, j + 1), &c__1, &work[ki + 1 + *n], &c__1);
  15646. /* Solve [T(J,J)-WR T(J,J+1) ]'* X = SCALE*( WORK1 )
  15647. [T(J+1,J) T(J+1,J+1)-WR] ( WORK2 ) */
  15648. NUMlapack_dlaln2 (&c_true, &c__2, &c__1, &smin, &c_b22, &t_ref (j, j), ldt, &c_b22,
  15649. &c_b22, &work[j + *n], n, &wr, &c_b25, x, &c__2, &scale, &xnorm, &ierr);
  15650. /* Scale if necessary */
  15651. if (scale != 1.) {
  15652. i__3 = *n - ki + 1;
  15653. NUMblas_dscal (&i__3, &scale, &work[ki + *n], &c__1);
  15654. }
  15655. work[j + *n] = x_ref (1, 1);
  15656. work[j + 1 + *n] = x_ref (2, 1);
  15657. /* Computing MAX */
  15658. d__3 = (d__1 = work[j + *n], fabs (d__1)), d__4 = (d__2 =
  15659. work[j + 1 + *n], fabs (d__2)), d__3 = MAX (d__3, d__4);
  15660. vmax = MAX (d__3, vmax);
  15661. vcrit = bignum / vmax;
  15662. }
  15663. L170:
  15664. ;
  15665. }
  15666. /* Copy the vector x or Q*x to VL and normalize. */
  15667. if (!over) {
  15668. i__2 = *n - ki + 1;
  15669. NUMblas_dcopy (&i__2, &work[ki + *n], &c__1, &vl_ref (ki, is), &c__1);
  15670. i__2 = *n - ki + 1;
  15671. ii = NUMblas_idamax (&i__2, &vl_ref (ki, is), &c__1) + ki - 1;
  15672. remax = 1. / (d__1 = vl_ref (ii, is), fabs (d__1));
  15673. i__2 = *n - ki + 1;
  15674. NUMblas_dscal (&i__2, &remax, &vl_ref (ki, is), &c__1);
  15675. i__2 = ki - 1;
  15676. for (k = 1; k <= i__2; ++k) {
  15677. vl_ref (k, is) = 0.;
  15678. /* L180: */
  15679. }
  15680. } else {
  15681. if (ki < *n) {
  15682. i__2 = *n - ki;
  15683. NUMblas_dgemv ("N", n, &i__2, &c_b22, &vl_ref (1, ki + 1), ldvl, &work[ki + 1 + *n],
  15684. &c__1, &work[ki + *n], &vl_ref (1, ki), &c__1);
  15685. }
  15686. ii = NUMblas_idamax (n, &vl_ref (1, ki), &c__1);
  15687. remax = 1. / (d__1 = vl_ref (ii, ki), fabs (d__1));
  15688. NUMblas_dscal (n, &remax, &vl_ref (1, ki), &c__1);
  15689. }
  15690. } else {
  15691. /* Complex left eigenvector.
  15692. Initial solve: ((T(KI,KI) T(KI,KI+1) )' - (WR - I* WI))*X
  15693. = 0. ((T(KI+1,KI) T(KI+1,KI+1)) ) */
  15694. if ( (d__1 = t_ref (ki, ki + 1), fabs (d__1)) >= (d__2 = t_ref (ki + 1, ki), fabs (d__2))) {
  15695. work[ki + *n] = wi / t_ref (ki, ki + 1);
  15696. work[ki + 1 + n2] = 1.;
  15697. } else {
  15698. work[ki + *n] = 1.;
  15699. work[ki + 1 + n2] = -wi / t_ref (ki + 1, ki);
  15700. }
  15701. work[ki + 1 + *n] = 0.;
  15702. work[ki + n2] = 0.;
  15703. /* Form right-hand side */
  15704. i__2 = *n;
  15705. for (k = ki + 2; k <= i__2; ++k) {
  15706. work[k + *n] = -work[ki + *n] * t_ref (ki, k);
  15707. work[k + n2] = -work[ki + 1 + n2] * t_ref (ki + 1, k);
  15708. /* L190: */
  15709. }
  15710. /* Solve complex quasi-triangular system: ( T(KI+2,N:KI+2,N)
  15711. - (WR-i*WI) )*X = WORK1+i*WORK2 */
  15712. vmax = 1.;
  15713. vcrit = bignum;
  15714. jnxt = ki + 2;
  15715. i__2 = *n;
  15716. for (j = ki + 2; j <= i__2; ++j) {
  15717. if (j < jnxt) {
  15718. goto L200;
  15719. }
  15720. j1 = j;
  15721. j2 = j;
  15722. jnxt = j + 1;
  15723. if (j < *n) {
  15724. if (t_ref (j + 1, j) != 0.) {
  15725. j2 = j + 1;
  15726. jnxt = j + 2;
  15727. }
  15728. }
  15729. if (j1 == j2) {
  15730. /* 1-by-1 diagonal block
  15731. Scale if necessary to avoid overflow when forming
  15732. the right-hand side elements. */
  15733. if (work[j] > vcrit) {
  15734. rec = 1. / vmax;
  15735. i__3 = *n - ki + 1;
  15736. NUMblas_dscal (&i__3, &rec, &work[ki + *n], &c__1);
  15737. i__3 = *n - ki + 1;
  15738. NUMblas_dscal (&i__3, &rec, &work[ki + n2], &c__1);
  15739. vmax = 1.;
  15740. vcrit = bignum;
  15741. }
  15742. i__3 = j - ki - 2;
  15743. work[j + *n] -=
  15744. NUMblas_ddot (&i__3, &t_ref (ki + 2, j), &c__1, &work[ki + 2 + *n], &c__1);
  15745. i__3 = j - ki - 2;
  15746. work[j + n2] -=
  15747. NUMblas_ddot (&i__3, &t_ref (ki + 2, j), &c__1, &work[ki + 2 + n2], &c__1);
  15748. /* Solve (T(J,J)-(WR-i*WI))*(X11+i*X12)= WK+I*WK2 */
  15749. d__1 = -wi;
  15750. NUMlapack_dlaln2 (&c_false, &c__1, &c__2, &smin, &c_b22, &t_ref (j, j), ldt, &c_b22,
  15751. &c_b22, &work[j + *n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, &ierr);
  15752. /* Scale if necessary */
  15753. if (scale != 1.) {
  15754. i__3 = *n - ki + 1;
  15755. NUMblas_dscal (&i__3, &scale, &work[ki + *n], &c__1);
  15756. i__3 = *n - ki + 1;
  15757. NUMblas_dscal (&i__3, &scale, &work[ki + n2], &c__1);
  15758. }
  15759. work[j + *n] = x_ref (1, 1);
  15760. work[j + n2] = x_ref (1, 2);
  15761. /* Computing MAX */
  15762. d__3 = (d__1 = work[j + *n], fabs (d__1)), d__4 = (d__2 =
  15763. work[j + n2], fabs (d__2)), d__3 = MAX (d__3, d__4);
  15764. vmax = MAX (d__3, vmax);
  15765. vcrit = bignum / vmax;
  15766. } else {
  15767. /* 2-by-2 diagonal block
  15768. Scale if necessary to avoid overflow when forming
  15769. the right-hand side elements.
  15770. Computing MAX */
  15771. d__1 = work[j], d__2 = work[j + 1];
  15772. beta = MAX (d__1, d__2);
  15773. if (beta > vcrit) {
  15774. rec = 1. / vmax;
  15775. i__3 = *n - ki + 1;
  15776. NUMblas_dscal (&i__3, &rec, &work[ki + *n], &c__1);
  15777. i__3 = *n - ki + 1;
  15778. NUMblas_dscal (&i__3, &rec, &work[ki + n2], &c__1);
  15779. vmax = 1.;
  15780. vcrit = bignum;
  15781. }
  15782. i__3 = j - ki - 2;
  15783. work[j + *n] -=
  15784. NUMblas_ddot (&i__3, &t_ref (ki + 2, j), &c__1, &work[ki + 2 + *n], &c__1);
  15785. i__3 = j - ki - 2;
  15786. work[j + n2] -=
  15787. NUMblas_ddot (&i__3, &t_ref (ki + 2, j), &c__1, &work[ki + 2 + n2], &c__1);
  15788. i__3 = j - ki - 2;
  15789. work[j + 1 + *n] -=
  15790. NUMblas_ddot (&i__3, &t_ref (ki + 2, j + 1), &c__1, &work[ki + 2 + *n], &c__1);
  15791. i__3 = j - ki - 2;
  15792. work[j + 1 + n2] -=
  15793. NUMblas_ddot (&i__3, &t_ref (ki + 2, j + 1), &c__1, &work[ki + 2 + n2], &c__1);
  15794. /* Solve 2-by-2 complex linear equation ([T(j,j)
  15795. T(j,j+1) ]'-(wr-i*wi)*I)*X = SCALE*B ([T(j+1,j)
  15796. T(j+1,j+1)] ) */
  15797. d__1 = -wi;
  15798. NUMlapack_dlaln2 (&c_true, &c__2, &c__2, &smin, &c_b22, &t_ref (j, j), ldt, &c_b22,
  15799. &c_b22, &work[j + *n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, &ierr);
  15800. /* Scale if necessary */
  15801. if (scale != 1.) {
  15802. i__3 = *n - ki + 1;
  15803. NUMblas_dscal (&i__3, &scale, &work[ki + *n], &c__1);
  15804. i__3 = *n - ki + 1;
  15805. NUMblas_dscal (&i__3, &scale, &work[ki + n2], &c__1);
  15806. }
  15807. work[j + *n] = x_ref (1, 1);
  15808. work[j + n2] = x_ref (1, 2);
  15809. work[j + 1 + *n] = x_ref (2, 1);
  15810. work[j + 1 + n2] = x_ref (2, 2);
  15811. /* Computing MAX */
  15812. d__5 = (d__1 = x_ref (1, 1), fabs (d__1)), d__6 = (d__2 =
  15813. x_ref (1, 2), fabs (d__2)), d__5 = MAX (d__5, d__6), d__6 = (d__3 =
  15814. x_ref (2, 1), fabs (d__3)), d__5 = MAX (d__5, d__6), d__6 = (d__4 =
  15815. x_ref (2, 2), fabs (d__4)), d__5 = MAX (d__5, d__6);
  15816. vmax = MAX (d__5, vmax);
  15817. vcrit = bignum / vmax;
  15818. }
  15819. L200:
  15820. ;
  15821. }
  15822. /* Copy the vector x or Q*x to VL and normalize.
  15823. L210: */
  15824. if (!over) {
  15825. i__2 = *n - ki + 1;
  15826. NUMblas_dcopy (&i__2, &work[ki + *n], &c__1, &vl_ref (ki, is), &c__1);
  15827. i__2 = *n - ki + 1;
  15828. NUMblas_dcopy (&i__2, &work[ki + n2], &c__1, &vl_ref (ki, is + 1), &c__1);
  15829. emax = 0.;
  15830. i__2 = *n;
  15831. for (k = ki; k <= i__2; ++k) {
  15832. /* Computing MAX */
  15833. d__3 = emax, d__4 = (d__1 = vl_ref (k, is), fabs (d__1)) + (d__2 =
  15834. vl_ref (k, is + 1), fabs (d__2));
  15835. emax = MAX (d__3, d__4);
  15836. /* L220: */
  15837. }
  15838. remax = 1. / emax;
  15839. i__2 = *n - ki + 1;
  15840. NUMblas_dscal (&i__2, &remax, &vl_ref (ki, is), &c__1);
  15841. i__2 = *n - ki + 1;
  15842. NUMblas_dscal (&i__2, &remax, &vl_ref (ki, is + 1), &c__1);
  15843. i__2 = ki - 1;
  15844. for (k = 1; k <= i__2; ++k) {
  15845. vl_ref (k, is) = 0.;
  15846. vl_ref (k, is + 1) = 0.;
  15847. /* L230: */
  15848. }
  15849. } else {
  15850. if (ki < *n - 1) {
  15851. i__2 = *n - ki - 1;
  15852. NUMblas_dgemv ("N", n, &i__2, &c_b22, &vl_ref (1, ki + 2), ldvl, &work[ki + 2 + *n],
  15853. &c__1, &work[ki + *n], &vl_ref (1, ki), &c__1);
  15854. i__2 = *n - ki - 1;
  15855. NUMblas_dgemv ("N", n, &i__2, &c_b22, &vl_ref (1, ki + 2), ldvl, &work[ki + 2 + n2],
  15856. &c__1, &work[ki + 1 + n2], &vl_ref (1, ki + 1), &c__1);
  15857. } else {
  15858. NUMblas_dscal (n, &work[ki + *n], &vl_ref (1, ki), &c__1);
  15859. NUMblas_dscal (n, &work[ki + 1 + n2], &vl_ref (1, ki + 1), &c__1);
  15860. }
  15861. emax = 0.;
  15862. i__2 = *n;
  15863. for (k = 1; k <= i__2; ++k) {
  15864. /* Computing MAX */
  15865. d__3 = emax, d__4 = (d__1 = vl_ref (k, ki), fabs (d__1)) + (d__2 =
  15866. vl_ref (k, ki + 1), fabs (d__2));
  15867. emax = MAX (d__3, d__4);
  15868. /* L240: */
  15869. }
  15870. remax = 1. / emax;
  15871. NUMblas_dscal (n, &remax, &vl_ref (1, ki), &c__1);
  15872. NUMblas_dscal (n, &remax, &vl_ref (1, ki + 1), &c__1);
  15873. }
  15874. }
  15875. ++is;
  15876. if (ip != 0) {
  15877. ++is;
  15878. }
  15879. L250:
  15880. if (ip == -1) {
  15881. ip = 0;
  15882. }
  15883. if (ip == 1) {
  15884. ip = -1;
  15885. }
  15886. /* L260: */
  15887. }
  15888. }
  15889. return 0;
  15890. } /* NUMlapack_dtrevc */
  15891. #undef vr_ref
  15892. #undef vl_ref
  15893. #undef x_ref
  15894. #undef t_ref
  15895. int NUMlapack_dtrti2 (const char *uplo, const char *diag, integer *n, double *a, integer *lda, integer *info) {
  15896. /* Table of constant values */
  15897. static integer c__1 = 1;
  15898. /* System generated locals */
  15899. integer a_dim1, a_offset, i__1, i__2;
  15900. /* Local variables */
  15901. static integer j;
  15902. static integer upper;
  15903. static integer nounit;
  15904. static double ajj;
  15905. a_dim1 = *lda;
  15906. a_offset = 1 + a_dim1 * 1;
  15907. a -= a_offset;
  15908. /* Function Body */
  15909. *info = 0;
  15910. upper = lsame_ (uplo, "U");
  15911. nounit = lsame_ (diag, "N");
  15912. if (!upper && !lsame_ (uplo, "L")) {
  15913. *info = -1;
  15914. } else if (!nounit && !lsame_ (diag, "U")) {
  15915. *info = -2;
  15916. } else if (*n < 0) {
  15917. *info = -3;
  15918. } else if (*lda < MAX (1, *n)) {
  15919. *info = -5;
  15920. }
  15921. if (*info != 0) {
  15922. i__1 = - (*info);
  15923. xerbla_ ("DTRTI2", &i__1);
  15924. return 0;
  15925. }
  15926. if (upper) {
  15927. /* Compute inverse of upper triangular matrix. */
  15928. i__1 = *n;
  15929. for (j = 1; j <= i__1; ++j) {
  15930. if (nounit) {
  15931. a_ref (j, j) = 1. / a_ref (j, j);
  15932. ajj = -a_ref (j, j);
  15933. } else {
  15934. ajj = -1.;
  15935. }
  15936. /* Compute elements 1:j-1 of j-th column. */
  15937. i__2 = j - 1;
  15938. NUMblas_dtrmv ("Upper", "No transpose", diag, &i__2, &a[a_offset], lda, &a_ref (1, j), &c__1);
  15939. i__2 = j - 1;
  15940. NUMblas_dscal (&i__2, &ajj, &a_ref (1, j), &c__1);
  15941. /* L10: */
  15942. }
  15943. } else {
  15944. /* Compute inverse of lower triangular matrix. */
  15945. for (j = *n; j >= 1; --j) {
  15946. if (nounit) {
  15947. a_ref (j, j) = 1. / a_ref (j, j);
  15948. ajj = -a_ref (j, j);
  15949. } else {
  15950. ajj = -1.;
  15951. }
  15952. if (j < *n) {
  15953. /* Compute elements j+1:n of j-th column. */
  15954. i__1 = *n - j;
  15955. NUMblas_dtrmv ("Lower", "No transpose", diag, &i__1, &a_ref (j + 1, j + 1), lda, &a_ref (j + 1, j),
  15956. &c__1);
  15957. i__1 = *n - j;
  15958. NUMblas_dscal (&i__1, &ajj, &a_ref (j + 1, j), &c__1);
  15959. }
  15960. /* L20: */
  15961. }
  15962. }
  15963. return 0;
  15964. } /* NUMlapack_dtrti2 */
  15965. int NUMlapack_dtrtri (const char *uplo, const char *diag, integer *n, double *a, integer *lda, integer *info) {
  15966. /* Table of constant values */
  15967. static integer c__1 = 1;
  15968. static integer c_n1 = -1;
  15969. static integer c__2 = 2;
  15970. static double c_b18 = 1.;
  15971. static double c_b22 = -1.;
  15972. /* System generated locals */
  15973. char *a__1[2];
  15974. integer a_dim1, a_offset, i__1, i__2[2], i__3, i__4, i__5;
  15975. char ch__1[2];
  15976. /* Local variables */
  15977. static integer j;
  15978. static integer upper;
  15979. static integer jb, nb, nn;
  15980. static integer nounit;
  15981. a_dim1 = *lda;
  15982. a_offset = 1 + a_dim1 * 1;
  15983. a -= a_offset;
  15984. /* Function Body */
  15985. *info = 0;
  15986. upper = lsame_ (uplo, "U");
  15987. nounit = lsame_ (diag, "N");
  15988. if (!upper && !lsame_ (uplo, "L")) {
  15989. *info = -1;
  15990. } else if (!nounit && !lsame_ (diag, "U")) {
  15991. *info = -2;
  15992. } else if (*n < 0) {
  15993. *info = -3;
  15994. } else if (*lda < MAX (1, *n)) {
  15995. *info = -5;
  15996. }
  15997. if (*info != 0) {
  15998. i__1 = - (*info);
  15999. xerbla_ ("DTRTRI", &i__1);
  16000. return 0;
  16001. }
  16002. /* Quick return if possible */
  16003. if (*n == 0) {
  16004. return 0;
  16005. }
  16006. /* Check for singularity if non-unit. */
  16007. if (nounit) {
  16008. i__1 = *n;
  16009. for (*info = 1; *info <= i__1; ++ (*info)) {
  16010. if (a_ref (*info, *info) == 0.) {
  16011. return 0;
  16012. }
  16013. /* L10: */
  16014. }
  16015. *info = 0;
  16016. }
  16017. /* Determine the block size for this environment.
  16018. Writing concatenation */
  16019. i__2[0] = 1, a__1[0] = (char *) uplo;
  16020. i__2[1] = 1, a__1[1] = (char *) diag;
  16021. s_cat (ch__1, (const char **) a__1, i__2, &c__2, 2);
  16022. nb = NUMlapack_ilaenv (&c__1, "DTRTRI", ch__1, n, &c_n1, &c_n1, &c_n1, 6, 2);
  16023. if (nb <= 1 || nb >= *n) {
  16024. /* Use unblocked code */
  16025. NUMlapack_dtrti2 (uplo, diag, n, &a[a_offset], lda, info);
  16026. } else {
  16027. /* Use blocked code */
  16028. if (upper) {
  16029. /* Compute inverse of upper triangular matrix */
  16030. i__1 = *n;
  16031. i__3 = nb;
  16032. for (j = 1; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
  16033. /* Computing MIN */
  16034. i__4 = nb, i__5 = *n - j + 1;
  16035. jb = MIN (i__4, i__5);
  16036. /* Compute rows 1:j-1 of current block column */
  16037. i__4 = j - 1;
  16038. NUMblas_dtrmm ("Left", "Upper", "No transpose", diag, &i__4, &jb, &c_b18, &a[a_offset], lda,
  16039. &a_ref (1, j), lda);
  16040. i__4 = j - 1;
  16041. NUMblas_dtrsm ("Right", "Upper", "No transpose", diag, &i__4, &jb, &c_b22, &a_ref (j, j), lda,
  16042. &a_ref (1, j), lda);
  16043. /* Compute inverse of current diagonal block */
  16044. NUMlapack_dtrti2 ("Upper", diag, &jb, &a_ref (j, j), lda, info);
  16045. /* L20: */
  16046. }
  16047. } else {
  16048. /* Compute inverse of lower triangular matrix */
  16049. nn = (*n - 1) / nb * nb + 1;
  16050. i__3 = -nb;
  16051. for (j = nn; i__3 < 0 ? j >= 1 : j <= 1; j += i__3) {
  16052. /* Computing MIN */
  16053. i__1 = nb, i__4 = *n - j + 1;
  16054. jb = MIN (i__1, i__4);
  16055. if (j + jb <= *n) {
  16056. /* Compute rows j+jb:n of current block column */
  16057. i__1 = *n - j - jb + 1;
  16058. NUMblas_dtrmm ("Left", "Lower", "No transpose", diag, &i__1, &jb, &c_b18, &a_ref (j + jb,
  16059. j + jb), lda, &a_ref (j + jb, j), lda);
  16060. i__1 = *n - j - jb + 1;
  16061. NUMblas_dtrsm ("Right", "Lower", "No transpose", diag, &i__1, &jb, &c_b22, &a_ref (j, j), lda,
  16062. &a_ref (j + jb, j), lda);
  16063. }
  16064. /* Compute inverse of current diagonal block */
  16065. NUMlapack_dtrti2 ("Lower", diag, &jb, &a_ref (j, j), lda, info);
  16066. /* L30: */
  16067. }
  16068. }
  16069. }
  16070. return 0;
  16071. } /* NUMlapack_dtrtri */
  16072. integer NUMlapack_ieeeck (integer *ispec, float *zero, float *one) {
  16073. /* System generated locals */
  16074. integer ret_val;
  16075. /* Local variables */
  16076. static float neginf, posinf, negzro, newzro, nan1, nan2, nan3, nan4, nan5, nan6;
  16077. ret_val = 1;
  16078. posinf = *one / *zero;
  16079. if (posinf <= *one) {
  16080. ret_val = 0;
  16081. return ret_val;
  16082. }
  16083. neginf = - (*one) / *zero;
  16084. if (neginf >= *zero) {
  16085. ret_val = 0;
  16086. return ret_val;
  16087. }
  16088. negzro = *one / (neginf + *one);
  16089. if (negzro != *zero) {
  16090. ret_val = 0;
  16091. return ret_val;
  16092. }
  16093. neginf = *one / negzro;
  16094. if (neginf >= *zero) {
  16095. ret_val = 0;
  16096. return ret_val;
  16097. }
  16098. newzro = negzro + *zero;
  16099. if (newzro != *zero) {
  16100. ret_val = 0;
  16101. return ret_val;
  16102. }
  16103. posinf = *one / newzro;
  16104. if (posinf <= *one) {
  16105. ret_val = 0;
  16106. return ret_val;
  16107. }
  16108. neginf *= posinf;
  16109. if (neginf >= *zero) {
  16110. ret_val = 0;
  16111. return ret_val;
  16112. }
  16113. posinf *= posinf;
  16114. if (posinf <= *one) {
  16115. ret_val = 0;
  16116. return ret_val;
  16117. }
  16118. /* Return if we were only asked to check infinity arithmetic */
  16119. if (*ispec == 0) {
  16120. return ret_val;
  16121. }
  16122. nan1 = posinf + neginf;
  16123. nan2 = posinf / neginf;
  16124. nan3 = posinf / posinf;
  16125. nan4 = posinf * *zero;
  16126. nan5 = neginf * negzro;
  16127. nan6 = nan5 * 0.f;
  16128. if (nan1 == nan1) {
  16129. ret_val = 0;
  16130. return ret_val;
  16131. }
  16132. if (nan2 == nan2) {
  16133. ret_val = 0;
  16134. return ret_val;
  16135. }
  16136. if (nan3 == nan3) {
  16137. ret_val = 0;
  16138. return ret_val;
  16139. }
  16140. if (nan4 == nan4) {
  16141. ret_val = 0;
  16142. return ret_val;
  16143. }
  16144. if (nan5 == nan5) {
  16145. ret_val = 0;
  16146. return ret_val;
  16147. }
  16148. if (nan6 == nan6) {
  16149. ret_val = 0;
  16150. return ret_val;
  16151. }
  16152. return ret_val;
  16153. } /* NUMlapack_ieeeck */
  16154. integer NUMlapack_ilaenv (integer *ispec, const char *name__, const char *opts, integer *n1, integer *n2, integer *n3, integer *n4,
  16155. integer name_len, integer opts_len) {
  16156. /* Table of constant values */
  16157. static integer c__0 = 0;
  16158. static float c_b162 = 0.f;
  16159. static float c_b163 = 1.f;
  16160. static integer c__1 = 1;
  16161. /* System generated locals */
  16162. integer ret_val;
  16163. /* Local variables */
  16164. static integer i__;
  16165. static integer cname, sname;
  16166. static integer nbmin;
  16167. static char c1[1], c2[2], c3[3], c4[2];
  16168. static integer ic, nb;
  16169. static integer iz, nx;
  16170. static char subnam[6];
  16171. (void) opts;
  16172. (void) n3;
  16173. (void) opts_len;
  16174. switch (*ispec) {
  16175. case 1:
  16176. goto L100;
  16177. case 2:
  16178. goto L100;
  16179. case 3:
  16180. goto L100;
  16181. case 4:
  16182. goto L400;
  16183. case 5:
  16184. goto L500;
  16185. case 6:
  16186. goto L600;
  16187. case 7:
  16188. goto L700;
  16189. case 8:
  16190. goto L800;
  16191. case 9:
  16192. goto L900;
  16193. case 10:
  16194. goto L1000;
  16195. case 11:
  16196. goto L1100;
  16197. }
  16198. /* Invalid value for ISPEC */
  16199. ret_val = -1;
  16200. return ret_val;
  16201. L100:
  16202. /* Convert NAME to upper case if the first character is lower case. */
  16203. ret_val = 1;
  16204. s_copy (subnam, (char *) name__, 6, name_len);
  16205. ic = * (unsigned char *) subnam;
  16206. iz = 'Z';
  16207. if (iz == 90 || iz == 122) {
  16208. /* ASCII character set */
  16209. if (ic >= 97 && ic <= 122) {
  16210. * (unsigned char *) subnam = (char) (ic - 32);
  16211. for (i__ = 2; i__ <= 6; ++i__) {
  16212. ic = * (unsigned char *) &subnam[i__ - 1];
  16213. if (ic >= 97 && ic <= 122) {
  16214. * (unsigned char *) &subnam[i__ - 1] = (char) (ic - 32);
  16215. }
  16216. /* L10: */
  16217. }
  16218. }
  16219. } else if (iz == 233 || iz == 169) {
  16220. /* EBCDIC character set */
  16221. if (ic >= 129 && ic <= 137 || ic >= 145 && ic <= 153 || ic >= 162 && ic <= 169) {
  16222. * (unsigned char *) subnam = (char) (ic + 64);
  16223. for (i__ = 2; i__ <= 6; ++i__) {
  16224. ic = * (unsigned char *) &subnam[i__ - 1];
  16225. if (ic >= 129 && ic <= 137 || ic >= 145 && ic <= 153 || ic >= 162 && ic <= 169) {
  16226. * (unsigned char *) &subnam[i__ - 1] = (char) (ic + 64);
  16227. }
  16228. /* L20: */
  16229. }
  16230. }
  16231. } else if (iz == 218 || iz == 250) {
  16232. /* Prime machines: ASCII+128 */
  16233. if (ic >= 225 && ic <= 250) {
  16234. * (unsigned char *) subnam = (char) (ic - 32);
  16235. for (i__ = 2; i__ <= 6; ++i__) {
  16236. ic = * (unsigned char *) &subnam[i__ - 1];
  16237. if (ic >= 225 && ic <= 250) {
  16238. * (unsigned char *) &subnam[i__ - 1] = (char) (ic - 32);
  16239. }
  16240. /* L30: */
  16241. }
  16242. }
  16243. }
  16244. * (unsigned char *) c1 = * (unsigned char *) subnam;
  16245. sname = * (unsigned char *) c1 == 'S' || * (unsigned char *) c1 == 'D';
  16246. cname = * (unsigned char *) c1 == 'C' || * (unsigned char *) c1 == 'Z';
  16247. if (! (cname || sname)) {
  16248. return ret_val;
  16249. }
  16250. s_copy (c2, subnam + 1, 2, 2);
  16251. s_copy (c3, subnam + 3, 3, 3);
  16252. s_copy (c4, c3 + 1, 2, 2);
  16253. switch (*ispec) {
  16254. case 1:
  16255. goto L110;
  16256. case 2:
  16257. goto L200;
  16258. case 3:
  16259. goto L300;
  16260. }
  16261. L110:
  16262. /* ISPEC = 1: block size
  16263. In these examples, separate code is provided for setting NB for real
  16264. and complex. We assume that NB will take the same value in single or
  16265. double precision. */
  16266. nb = 1;
  16267. if (s_cmp (c2, "GE", 2, 2) == 0) {
  16268. if (s_cmp (c3, "TRF", 3, 3) == 0) {
  16269. if (sname) {
  16270. nb = 64;
  16271. } else {
  16272. nb = 64;
  16273. }
  16274. } else if (s_cmp (c3, "QRF", 3, 3) == 0 || s_cmp (c3, "RQF", 3, 3) == 0 || s_cmp (c3, "LQF", 3, 3) == 0
  16275. || s_cmp (c3, "QLF", 3, 3) == 0) {
  16276. if (sname) {
  16277. nb = 32;
  16278. } else {
  16279. nb = 32;
  16280. }
  16281. } else if (s_cmp (c3, "HRD", 3, 3) == 0) {
  16282. if (sname) {
  16283. nb = 32;
  16284. } else {
  16285. nb = 32;
  16286. }
  16287. } else if (s_cmp (c3, "BRD", 3, 3) == 0) {
  16288. if (sname) {
  16289. nb = 32;
  16290. } else {
  16291. nb = 32;
  16292. }
  16293. } else if (s_cmp (c3, "TRI", 3, 3) == 0) {
  16294. if (sname) {
  16295. nb = 64;
  16296. } else {
  16297. nb = 64;
  16298. }
  16299. }
  16300. } else if (s_cmp (c2, "PO", 2, 2) == 0) {
  16301. if (s_cmp (c3, "TRF", 3, 3) == 0) {
  16302. if (sname) {
  16303. nb = 64;
  16304. } else {
  16305. nb = 64;
  16306. }
  16307. }
  16308. } else if (s_cmp (c2, "SY", 2, 2) == 0) {
  16309. if (s_cmp (c3, "TRF", 3, 3) == 0) {
  16310. if (sname) {
  16311. nb = 64;
  16312. } else {
  16313. nb = 64;
  16314. }
  16315. } else if (sname && s_cmp (c3, "TRD", 3, 3) == 0) {
  16316. nb = 32;
  16317. } else if (sname && s_cmp (c3, "GST", 3, 3) == 0) {
  16318. nb = 64;
  16319. }
  16320. } else if (cname && s_cmp (c2, "HE", 2, 2) == 0) {
  16321. if (s_cmp (c3, "TRF", 3, 3) == 0) {
  16322. nb = 64;
  16323. } else if (s_cmp (c3, "TRD", 3, 3) == 0) {
  16324. nb = 32;
  16325. } else if (s_cmp (c3, "GST", 3, 3) == 0) {
  16326. nb = 64;
  16327. }
  16328. } else if (sname && s_cmp (c2, "OR", 2, 2) == 0) {
  16329. if (* (unsigned char *) c3 == 'G') {
  16330. if (s_cmp (c4, "QR", 2, 2) == 0 || s_cmp (c4, "RQ", 2, 2) == 0 || s_cmp (c4, "LQ", 2, 2) == 0 ||
  16331. s_cmp (c4, "QL", 2, 2) == 0 || s_cmp (c4, "HR", 2, 2) == 0 || s_cmp (c4, "TR", 2, 2) == 0 ||
  16332. s_cmp (c4, "BR", 2, 2) == 0) {
  16333. nb = 32;
  16334. }
  16335. } else if (* (unsigned char *) c3 == 'M') {
  16336. if (s_cmp (c4, "QR", 2, 2) == 0 || s_cmp (c4, "RQ", 2, 2) == 0 || s_cmp (c4, "LQ", 2, 2) == 0 ||
  16337. s_cmp (c4, "QL", 2, 2) == 0 || s_cmp (c4, "HR", 2, 2) == 0 || s_cmp (c4, "TR", 2, 2) == 0 ||
  16338. s_cmp (c4, "BR", 2, 2) == 0) {
  16339. nb = 32;
  16340. }
  16341. }
  16342. } else if (cname && s_cmp (c2, "UN", 2, 2) == 0) {
  16343. if (* (unsigned char *) c3 == 'G') {
  16344. if (s_cmp (c4, "QR", 2, 2) == 0 || s_cmp (c4, "RQ", 2, 2) == 0 || s_cmp (c4, "LQ", 2, 2) == 0 ||
  16345. s_cmp (c4, "QL", 2, 2) == 0 || s_cmp (c4, "HR", 2, 2) == 0 || s_cmp (c4, "TR", 2, 2) == 0 ||
  16346. s_cmp (c4, "BR", 2, 2) == 0) {
  16347. nb = 32;
  16348. }
  16349. } else if (* (unsigned char *) c3 == 'M') {
  16350. if (s_cmp (c4, "QR", 2, 2) == 0 || s_cmp (c4, "RQ", 2, 2) == 0 || s_cmp (c4, "LQ", 2, 2) == 0 ||
  16351. s_cmp (c4, "QL", 2, 2) == 0 || s_cmp (c4, "HR", 2, 2) == 0 || s_cmp (c4, "TR", 2, 2) == 0 ||
  16352. s_cmp (c4, "BR", 2, 2) == 0) {
  16353. nb = 32;
  16354. }
  16355. }
  16356. } else if (s_cmp (c2, "GB", 2, 2) == 0) {
  16357. if (s_cmp (c3, "TRF", 3, 3) == 0) {
  16358. if (sname) {
  16359. if (*n4 <= 64) {
  16360. nb = 1;
  16361. } else {
  16362. nb = 32;
  16363. }
  16364. } else {
  16365. if (*n4 <= 64) {
  16366. nb = 1;
  16367. } else {
  16368. nb = 32;
  16369. }
  16370. }
  16371. }
  16372. } else if (s_cmp (c2, "PB", 2, 2) == 0) {
  16373. if (s_cmp (c3, "TRF", 3, 3) == 0) {
  16374. if (sname) {
  16375. if (*n2 <= 64) {
  16376. nb = 1;
  16377. } else {
  16378. nb = 32;
  16379. }
  16380. } else {
  16381. if (*n2 <= 64) {
  16382. nb = 1;
  16383. } else {
  16384. nb = 32;
  16385. }
  16386. }
  16387. }
  16388. } else if (s_cmp (c2, "TR", 2, 2) == 0) {
  16389. if (s_cmp (c3, "TRI", 3, 3) == 0) {
  16390. if (sname) {
  16391. nb = 64;
  16392. } else {
  16393. nb = 64;
  16394. }
  16395. }
  16396. } else if (s_cmp (c2, "LA", 2, 2) == 0) {
  16397. if (s_cmp (c3, "UUM", 3, 3) == 0) {
  16398. if (sname) {
  16399. nb = 64;
  16400. } else {
  16401. nb = 64;
  16402. }
  16403. }
  16404. } else if (sname && s_cmp (c2, "ST", 2, 2) == 0) {
  16405. if (s_cmp (c3, "EBZ", 3, 3) == 0) {
  16406. nb = 1;
  16407. }
  16408. }
  16409. ret_val = nb;
  16410. return ret_val;
  16411. L200:
  16412. /* ISPEC = 2: minimum block size */
  16413. nbmin = 2;
  16414. if (s_cmp (c2, "GE", 2, 2) == 0) {
  16415. if (s_cmp (c3, "QRF", 3, 3) == 0 || s_cmp (c3, "RQF", 3, 3) == 0 || s_cmp (c3, "LQF", 3, 3) == 0 ||
  16416. s_cmp (c3, "QLF", 3, 3) == 0) {
  16417. if (sname) {
  16418. nbmin = 2;
  16419. } else {
  16420. nbmin = 2;
  16421. }
  16422. } else if (s_cmp (c3, "HRD", 3, 3) == 0) {
  16423. if (sname) {
  16424. nbmin = 2;
  16425. } else {
  16426. nbmin = 2;
  16427. }
  16428. } else if (s_cmp (c3, "BRD", 3, 3) == 0) {
  16429. if (sname) {
  16430. nbmin = 2;
  16431. } else {
  16432. nbmin = 2;
  16433. }
  16434. } else if (s_cmp (c3, "TRI", 3, 3) == 0) {
  16435. if (sname) {
  16436. nbmin = 2;
  16437. } else {
  16438. nbmin = 2;
  16439. }
  16440. }
  16441. } else if (s_cmp (c2, "SY", 2, 2) == 0) {
  16442. if (s_cmp (c3, "TRF", 3, 3) == 0) {
  16443. if (sname) {
  16444. nbmin = 8;
  16445. } else {
  16446. nbmin = 8;
  16447. }
  16448. } else if (sname && s_cmp (c3, "TRD", 3, 3) == 0) {
  16449. nbmin = 2;
  16450. }
  16451. } else if (cname && s_cmp (c2, "HE", 2, 2) == 0) {
  16452. if (s_cmp (c3, "TRD", 3, 3) == 0) {
  16453. nbmin = 2;
  16454. }
  16455. } else if (sname && s_cmp (c2, "OR", 2, 2) == 0) {
  16456. if (* (unsigned char *) c3 == 'G') {
  16457. if (s_cmp (c4, "QR", 2, 2) == 0 || s_cmp (c4, "RQ", 2, 2) == 0 || s_cmp (c4, "LQ", 2, 2) == 0 ||
  16458. s_cmp (c4, "QL", 2, 2) == 0 || s_cmp (c4, "HR", 2, 2) == 0 || s_cmp (c4, "TR", 2, 2) == 0 ||
  16459. s_cmp (c4, "BR", 2, 2) == 0) {
  16460. nbmin = 2;
  16461. }
  16462. } else if (* (unsigned char *) c3 == 'M') {
  16463. if (s_cmp (c4, "QR", 2, 2) == 0 || s_cmp (c4, "RQ", 2, 2) == 0 || s_cmp (c4, "LQ", 2, 2) == 0 ||
  16464. s_cmp (c4, "QL", 2, 2) == 0 || s_cmp (c4, "HR", 2, 2) == 0 || s_cmp (c4, "TR", 2, 2) == 0 ||
  16465. s_cmp (c4, "BR", 2, 2) == 0) {
  16466. nbmin = 2;
  16467. }
  16468. }
  16469. } else if (cname && s_cmp (c2, "UN", 2, 2) == 0) {
  16470. if (* (unsigned char *) c3 == 'G') {
  16471. if (s_cmp (c4, "QR", 2, 2) == 0 || s_cmp (c4, "RQ", 2, 2) == 0 || s_cmp (c4, "LQ", 2, 2) == 0 ||
  16472. s_cmp (c4, "QL", 2, 2) == 0 || s_cmp (c4, "HR", 2, 2) == 0 || s_cmp (c4, "TR", 2, 2) == 0 ||
  16473. s_cmp (c4, "BR", 2, 2) == 0) {
  16474. nbmin = 2;
  16475. }
  16476. } else if (* (unsigned char *) c3 == 'M') {
  16477. if (s_cmp (c4, "QR", 2, 2) == 0 || s_cmp (c4, "RQ", 2, 2) == 0 || s_cmp (c4, "LQ", 2, 2) == 0 ||
  16478. s_cmp (c4, "QL", 2, 2) == 0 || s_cmp (c4, "HR", 2, 2) == 0 || s_cmp (c4, "TR", 2, 2) == 0 ||
  16479. s_cmp (c4, "BR", 2, 2) == 0) {
  16480. nbmin = 2;
  16481. }
  16482. }
  16483. }
  16484. ret_val = nbmin;
  16485. return ret_val;
  16486. L300:
  16487. /* ISPEC = 3: crossover point */
  16488. nx = 0;
  16489. if (s_cmp (c2, "GE", 2, 2) == 0) {
  16490. if (s_cmp (c3, "QRF", 3, 3) == 0 || s_cmp (c3, "RQF", 3, 3) == 0 || s_cmp (c3, "LQF", 3, 3) == 0 ||
  16491. s_cmp (c3, "QLF", 3, 3) == 0) {
  16492. if (sname) {
  16493. nx = 128;
  16494. } else {
  16495. nx = 128;
  16496. }
  16497. } else if (s_cmp (c3, "HRD", 3, 3) == 0) {
  16498. if (sname) {
  16499. nx = 128;
  16500. } else {
  16501. nx = 128;
  16502. }
  16503. } else if (s_cmp (c3, "BRD", 3, 3) == 0) {
  16504. if (sname) {
  16505. nx = 128;
  16506. } else {
  16507. nx = 128;
  16508. }
  16509. }
  16510. } else if (s_cmp (c2, "SY", 2, 2) == 0) {
  16511. if (sname && s_cmp (c3, "TRD", 3, 3) == 0) {
  16512. nx = 32;
  16513. }
  16514. } else if (cname && s_cmp (c2, "HE", 2, 2) == 0) {
  16515. if (s_cmp (c3, "TRD", 3, 3) == 0) {
  16516. nx = 32;
  16517. }
  16518. } else if (sname && s_cmp (c2, "OR", 2, 2) == 0) {
  16519. if (* (unsigned char *) c3 == 'G') {
  16520. if (s_cmp (c4, "QR", 2, 2) == 0 || s_cmp (c4, "RQ", 2, 2) == 0 || s_cmp (c4, "LQ", 2, 2) == 0 ||
  16521. s_cmp (c4, "QL", 2, 2) == 0 || s_cmp (c4, "HR", 2, 2) == 0 || s_cmp (c4, "TR", 2, 2) == 0 ||
  16522. s_cmp (c4, "BR", 2, 2) == 0) {
  16523. nx = 128;
  16524. }
  16525. }
  16526. } else if (cname && s_cmp (c2, "UN", 2, 2) == 0) {
  16527. if (* (unsigned char *) c3 == 'G') {
  16528. if (s_cmp (c4, "QR", 2, 2) == 0 || s_cmp (c4, "RQ", 2, 2) == 0 || s_cmp (c4, "LQ", 2, 2) == 0 ||
  16529. s_cmp (c4, "QL", 2, 2) == 0 || s_cmp (c4, "HR", 2, 2) == 0 || s_cmp (c4, "TR", 2, 2) == 0 ||
  16530. s_cmp (c4, "BR", 2, 2) == 0) {
  16531. nx = 128;
  16532. }
  16533. }
  16534. }
  16535. ret_val = nx;
  16536. return ret_val;
  16537. L400:
  16538. /* ISPEC = 4: number of shifts (used by xHSEQR) */
  16539. ret_val = 6;
  16540. return ret_val;
  16541. L500:
  16542. /* ISPEC = 5: minimum column dimension (not used) */
  16543. ret_val = 2;
  16544. return ret_val;
  16545. L600:
  16546. /* ISPEC = 6: crossover point for SVD (used by xGELSS and xGESVD) */
  16547. ret_val = (integer) ( (float) MIN (*n1, *n2) * 1.6f);
  16548. return ret_val;
  16549. L700:
  16550. /* ISPEC = 7: number of processors (not used) */
  16551. ret_val = 1;
  16552. return ret_val;
  16553. L800:
  16554. /* ISPEC = 8: crossover point for multishift (used by xHSEQR) */
  16555. ret_val = 50;
  16556. return ret_val;
  16557. L900:
  16558. /* ISPEC = 9: maximum size of the subproblems at the bottom of the
  16559. computation tree in the divide-and-conquer algorithm (used by xGELSD
  16560. and xGESDD) */
  16561. ret_val = 25;
  16562. return ret_val;
  16563. L1000:
  16564. /* ISPEC = 10: ieee NaN arithmetic can be trusted not to trap
  16565. ILAENV = 0 */
  16566. ret_val = 1;
  16567. if (ret_val == 1) {
  16568. ret_val = NUMlapack_ieeeck (&c__0, &c_b162, &c_b163);
  16569. }
  16570. return ret_val;
  16571. L1100:
  16572. /* ISPEC = 11: infinity arithmetic can be trusted not to trap
  16573. ILAENV = 0 */
  16574. ret_val = 1;
  16575. if (ret_val == 1) {
  16576. ret_val = NUMlapack_ieeeck (&c__1, &c_b162, &c_b163);
  16577. }
  16578. return ret_val;
  16579. } /* NUMlapack_ilaenv */
  16580. #undef a_ref
  16581. #undef c___ref
  16582. #undef MAX
  16583. #undef MIN
  16584. /* End of file NUMclapack.c */