1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332 |
- // SPDX-License-Identifier: GPL-2.0
- //
- // Cryptographic API.
- //
- // Support for Samsung S5PV210 and Exynos HW acceleration.
- //
- // Copyright (C) 2011 NetUP Inc. All rights reserved.
- // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
- //
- // Hash part based on omap-sham.c driver.
- #include <linux/clk.h>
- #include <linux/crypto.h>
- #include <linux/dma-mapping.h>
- #include <linux/err.h>
- #include <linux/errno.h>
- #include <linux/init.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
- #include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/of.h>
- #include <linux/platform_device.h>
- #include <linux/scatterlist.h>
- #include <crypto/ctr.h>
- #include <crypto/aes.h>
- #include <crypto/algapi.h>
- #include <crypto/scatterwalk.h>
- #include <crypto/hash.h>
- #include <crypto/md5.h>
- #include <crypto/sha.h>
- #include <crypto/internal/hash.h>
- #define _SBF(s, v) ((v) << (s))
- /* Feed control registers */
- #define SSS_REG_FCINTSTAT 0x0000
- #define SSS_FCINTSTAT_HPARTINT BIT(7)
- #define SSS_FCINTSTAT_HDONEINT BIT(5)
- #define SSS_FCINTSTAT_BRDMAINT BIT(3)
- #define SSS_FCINTSTAT_BTDMAINT BIT(2)
- #define SSS_FCINTSTAT_HRDMAINT BIT(1)
- #define SSS_FCINTSTAT_PKDMAINT BIT(0)
- #define SSS_REG_FCINTENSET 0x0004
- #define SSS_FCINTENSET_HPARTINTENSET BIT(7)
- #define SSS_FCINTENSET_HDONEINTENSET BIT(5)
- #define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
- #define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
- #define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
- #define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
- #define SSS_REG_FCINTENCLR 0x0008
- #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
- #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
- #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
- #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
- #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
- #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
- #define SSS_REG_FCINTPEND 0x000C
- #define SSS_FCINTPEND_HPARTINTP BIT(7)
- #define SSS_FCINTPEND_HDONEINTP BIT(5)
- #define SSS_FCINTPEND_BRDMAINTP BIT(3)
- #define SSS_FCINTPEND_BTDMAINTP BIT(2)
- #define SSS_FCINTPEND_HRDMAINTP BIT(1)
- #define SSS_FCINTPEND_PKDMAINTP BIT(0)
- #define SSS_REG_FCFIFOSTAT 0x0010
- #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
- #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
- #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
- #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
- #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
- #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
- #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
- #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
- #define SSS_REG_FCFIFOCTRL 0x0014
- #define SSS_FCFIFOCTRL_DESSEL BIT(2)
- #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
- #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
- #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
- #define SSS_HASHIN_MASK _SBF(0, 0x03)
- #define SSS_REG_FCBRDMAS 0x0020
- #define SSS_REG_FCBRDMAL 0x0024
- #define SSS_REG_FCBRDMAC 0x0028
- #define SSS_FCBRDMAC_BYTESWAP BIT(1)
- #define SSS_FCBRDMAC_FLUSH BIT(0)
- #define SSS_REG_FCBTDMAS 0x0030
- #define SSS_REG_FCBTDMAL 0x0034
- #define SSS_REG_FCBTDMAC 0x0038
- #define SSS_FCBTDMAC_BYTESWAP BIT(1)
- #define SSS_FCBTDMAC_FLUSH BIT(0)
- #define SSS_REG_FCHRDMAS 0x0040
- #define SSS_REG_FCHRDMAL 0x0044
- #define SSS_REG_FCHRDMAC 0x0048
- #define SSS_FCHRDMAC_BYTESWAP BIT(1)
- #define SSS_FCHRDMAC_FLUSH BIT(0)
- #define SSS_REG_FCPKDMAS 0x0050
- #define SSS_REG_FCPKDMAL 0x0054
- #define SSS_REG_FCPKDMAC 0x0058
- #define SSS_FCPKDMAC_BYTESWAP BIT(3)
- #define SSS_FCPKDMAC_DESCEND BIT(2)
- #define SSS_FCPKDMAC_TRANSMIT BIT(1)
- #define SSS_FCPKDMAC_FLUSH BIT(0)
- #define SSS_REG_FCPKDMAO 0x005C
- /* AES registers */
- #define SSS_REG_AES_CONTROL 0x00
- #define SSS_AES_BYTESWAP_DI BIT(11)
- #define SSS_AES_BYTESWAP_DO BIT(10)
- #define SSS_AES_BYTESWAP_IV BIT(9)
- #define SSS_AES_BYTESWAP_CNT BIT(8)
- #define SSS_AES_BYTESWAP_KEY BIT(7)
- #define SSS_AES_KEY_CHANGE_MODE BIT(6)
- #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
- #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
- #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
- #define SSS_AES_FIFO_MODE BIT(3)
- #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
- #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
- #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
- #define SSS_AES_MODE_DECRYPT BIT(0)
- #define SSS_REG_AES_STATUS 0x04
- #define SSS_AES_BUSY BIT(2)
- #define SSS_AES_INPUT_READY BIT(1)
- #define SSS_AES_OUTPUT_READY BIT(0)
- #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
- #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
- #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
- #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
- #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
- #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
- #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
- #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
- #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
- #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
- SSS_AES_REG(dev, reg))
- /* HW engine modes */
- #define FLAGS_AES_DECRYPT BIT(0)
- #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
- #define FLAGS_AES_CBC _SBF(1, 0x01)
- #define FLAGS_AES_CTR _SBF(1, 0x02)
- #define AES_KEY_LEN 16
- #define CRYPTO_QUEUE_LEN 1
- /* HASH registers */
- #define SSS_REG_HASH_CTRL 0x00
- #define SSS_HASH_USER_IV_EN BIT(5)
- #define SSS_HASH_INIT_BIT BIT(4)
- #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
- #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
- #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
- #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
- #define SSS_REG_HASH_CTRL_PAUSE 0x04
- #define SSS_HASH_PAUSE BIT(0)
- #define SSS_REG_HASH_CTRL_FIFO 0x08
- #define SSS_HASH_FIFO_MODE_DMA BIT(0)
- #define SSS_HASH_FIFO_MODE_CPU 0
- #define SSS_REG_HASH_CTRL_SWAP 0x0C
- #define SSS_HASH_BYTESWAP_DI BIT(3)
- #define SSS_HASH_BYTESWAP_DO BIT(2)
- #define SSS_HASH_BYTESWAP_IV BIT(1)
- #define SSS_HASH_BYTESWAP_KEY BIT(0)
- #define SSS_REG_HASH_STATUS 0x10
- #define SSS_HASH_STATUS_MSG_DONE BIT(6)
- #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
- #define SSS_HASH_STATUS_BUFFER_READY BIT(0)
- #define SSS_REG_HASH_MSG_SIZE_LOW 0x20
- #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
- #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
- #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
- #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
- #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
- #define HASH_BLOCK_SIZE 64
- #define HASH_REG_SIZEOF 4
- #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
- #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
- #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
- /*
- * HASH bit numbers, used by device, setting in dev->hash_flags with
- * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
- * to keep HASH state BUSY or FREE, or to signal state from irq_handler
- * to hash_tasklet. SGS keep track of allocated memory for scatterlist
- */
- #define HASH_FLAGS_BUSY 0
- #define HASH_FLAGS_FINAL 1
- #define HASH_FLAGS_DMA_ACTIVE 2
- #define HASH_FLAGS_OUTPUT_READY 3
- #define HASH_FLAGS_DMA_READY 4
- #define HASH_FLAGS_SGS_COPIED 5
- #define HASH_FLAGS_SGS_ALLOCED 6
- /* HASH HW constants */
- #define BUFLEN HASH_BLOCK_SIZE
- #define SSS_HASH_DMA_LEN_ALIGN 8
- #define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
- #define SSS_HASH_QUEUE_LENGTH 10
- /**
- * struct samsung_aes_variant - platform specific SSS driver data
- * @aes_offset: AES register offset from SSS module's base.
- * @hash_offset: HASH register offset from SSS module's base.
- *
- * Specifies platform specific configuration of SSS module.
- * Note: A structure for driver specific platform data is used for future
- * expansion of its usage.
- */
- struct samsung_aes_variant {
- unsigned int aes_offset;
- unsigned int hash_offset;
- };
- struct s5p_aes_reqctx {
- unsigned long mode;
- };
- struct s5p_aes_ctx {
- struct s5p_aes_dev *dev;
- uint8_t aes_key[AES_MAX_KEY_SIZE];
- uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
- int keylen;
- };
- /**
- * struct s5p_aes_dev - Crypto device state container
- * @dev: Associated device
- * @clk: Clock for accessing hardware
- * @ioaddr: Mapped IO memory region
- * @aes_ioaddr: Per-varian offset for AES block IO memory
- * @irq_fc: Feed control interrupt line
- * @req: Crypto request currently handled by the device
- * @ctx: Configuration for currently handled crypto request
- * @sg_src: Scatter list with source data for currently handled block
- * in device. This is DMA-mapped into device.
- * @sg_dst: Scatter list with destination data for currently handled block
- * in device. This is DMA-mapped into device.
- * @sg_src_cpy: In case of unaligned access, copied scatter list
- * with source data.
- * @sg_dst_cpy: In case of unaligned access, copied scatter list
- * with destination data.
- * @tasklet: New request scheduling jib
- * @queue: Crypto queue
- * @busy: Indicates whether the device is currently handling some request
- * thus it uses some of the fields from this state, like:
- * req, ctx, sg_src/dst (and copies). This essentially
- * protects against concurrent access to these fields.
- * @lock: Lock for protecting both access to device hardware registers
- * and fields related to current request (including the busy field).
- * @res: Resources for hash.
- * @io_hash_base: Per-variant offset for HASH block IO memory.
- * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
- * variable.
- * @hash_flags: Flags for current HASH op.
- * @hash_queue: Async hash queue.
- * @hash_tasklet: New HASH request scheduling job.
- * @xmit_buf: Buffer for current HASH request transfer into SSS block.
- * @hash_req: Current request sending to SSS HASH block.
- * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
- * @hash_sg_cnt: Counter for hash_sg_iter.
- *
- * @use_hash: true if HASH algs enabled
- */
- struct s5p_aes_dev {
- struct device *dev;
- struct clk *clk;
- void __iomem *ioaddr;
- void __iomem *aes_ioaddr;
- int irq_fc;
- struct ablkcipher_request *req;
- struct s5p_aes_ctx *ctx;
- struct scatterlist *sg_src;
- struct scatterlist *sg_dst;
- struct scatterlist *sg_src_cpy;
- struct scatterlist *sg_dst_cpy;
- struct tasklet_struct tasklet;
- struct crypto_queue queue;
- bool busy;
- spinlock_t lock;
- struct resource *res;
- void __iomem *io_hash_base;
- spinlock_t hash_lock; /* protect hash_ vars */
- unsigned long hash_flags;
- struct crypto_queue hash_queue;
- struct tasklet_struct hash_tasklet;
- u8 xmit_buf[BUFLEN];
- struct ahash_request *hash_req;
- struct scatterlist *hash_sg_iter;
- unsigned int hash_sg_cnt;
- bool use_hash;
- };
- /**
- * struct s5p_hash_reqctx - HASH request context
- * @dd: Associated device
- * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
- * @digcnt: Number of bytes processed by HW (without buffer[] ones)
- * @digest: Digest message or IV for partial result
- * @nregs: Number of HW registers for digest or IV read/write
- * @engine: Bits for selecting type of HASH in SSS block
- * @sg: sg for DMA transfer
- * @sg_len: Length of sg for DMA transfer
- * @sgl[]: sg for joining buffer and req->src scatterlist
- * @skip: Skip offset in req->src for current op
- * @total: Total number of bytes for current request
- * @finup: Keep state for finup or final.
- * @error: Keep track of error.
- * @bufcnt: Number of bytes holded in buffer[]
- * @buffer[]: For byte(s) from end of req->src in UPDATE op
- */
- struct s5p_hash_reqctx {
- struct s5p_aes_dev *dd;
- bool op_update;
- u64 digcnt;
- u8 digest[SHA256_DIGEST_SIZE];
- unsigned int nregs; /* digest_size / sizeof(reg) */
- u32 engine;
- struct scatterlist *sg;
- unsigned int sg_len;
- struct scatterlist sgl[2];
- unsigned int skip;
- unsigned int total;
- bool finup;
- bool error;
- u32 bufcnt;
- u8 buffer[0];
- };
- /**
- * struct s5p_hash_ctx - HASH transformation context
- * @dd: Associated device
- * @flags: Bits for algorithm HASH.
- * @fallback: Software transformation for zero message or size < BUFLEN.
- */
- struct s5p_hash_ctx {
- struct s5p_aes_dev *dd;
- unsigned long flags;
- struct crypto_shash *fallback;
- };
- static const struct samsung_aes_variant s5p_aes_data = {
- .aes_offset = 0x4000,
- .hash_offset = 0x6000,
- };
- static const struct samsung_aes_variant exynos_aes_data = {
- .aes_offset = 0x200,
- .hash_offset = 0x400,
- };
- static const struct of_device_id s5p_sss_dt_match[] = {
- {
- .compatible = "samsung,s5pv210-secss",
- .data = &s5p_aes_data,
- },
- {
- .compatible = "samsung,exynos4210-secss",
- .data = &exynos_aes_data,
- },
- { },
- };
- MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
- static inline const struct samsung_aes_variant *find_s5p_sss_version
- (const struct platform_device *pdev)
- {
- if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
- const struct of_device_id *match;
- match = of_match_node(s5p_sss_dt_match,
- pdev->dev.of_node);
- return (const struct samsung_aes_variant *)match->data;
- }
- return (const struct samsung_aes_variant *)
- platform_get_device_id(pdev)->driver_data;
- }
- static struct s5p_aes_dev *s5p_dev;
- static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
- const struct scatterlist *sg)
- {
- SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
- SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
- }
- static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
- const struct scatterlist *sg)
- {
- SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
- SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
- }
- static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
- {
- int len;
- if (!*sg)
- return;
- len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
- free_pages((unsigned long)sg_virt(*sg), get_order(len));
- kfree(*sg);
- *sg = NULL;
- }
- static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
- unsigned int nbytes, int out)
- {
- struct scatter_walk walk;
- if (!nbytes)
- return;
- scatterwalk_start(&walk, sg);
- scatterwalk_copychunks(buf, &walk, nbytes, out);
- scatterwalk_done(&walk, out, 0);
- }
- static void s5p_sg_done(struct s5p_aes_dev *dev)
- {
- if (dev->sg_dst_cpy) {
- dev_dbg(dev->dev,
- "Copying %d bytes of output data back to original place\n",
- dev->req->nbytes);
- s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
- dev->req->nbytes, 1);
- }
- s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
- s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
- }
- /* Calls the completion. Cannot be called with dev->lock hold. */
- static void s5p_aes_complete(struct ablkcipher_request *req, int err)
- {
- req->base.complete(&req->base, err);
- }
- static void s5p_unset_outdata(struct s5p_aes_dev *dev)
- {
- dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
- }
- static void s5p_unset_indata(struct s5p_aes_dev *dev)
- {
- dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
- }
- static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
- struct scatterlist **dst)
- {
- void *pages;
- int len;
- *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
- if (!*dst)
- return -ENOMEM;
- len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
- pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
- if (!pages) {
- kfree(*dst);
- *dst = NULL;
- return -ENOMEM;
- }
- s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
- sg_init_table(*dst, 1);
- sg_set_buf(*dst, pages, len);
- return 0;
- }
- static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
- {
- int err;
- if (!sg->length) {
- err = -EINVAL;
- goto exit;
- }
- err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
- if (!err) {
- err = -ENOMEM;
- goto exit;
- }
- dev->sg_dst = sg;
- err = 0;
- exit:
- return err;
- }
- static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
- {
- int err;
- if (!sg->length) {
- err = -EINVAL;
- goto exit;
- }
- err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
- if (!err) {
- err = -ENOMEM;
- goto exit;
- }
- dev->sg_src = sg;
- err = 0;
- exit:
- return err;
- }
- /*
- * Returns -ERRNO on error (mapping of new data failed).
- * On success returns:
- * - 0 if there is no more data,
- * - 1 if new transmitting (output) data is ready and its address+length
- * have to be written to device (by calling s5p_set_dma_outdata()).
- */
- static int s5p_aes_tx(struct s5p_aes_dev *dev)
- {
- int ret = 0;
- s5p_unset_outdata(dev);
- if (!sg_is_last(dev->sg_dst)) {
- ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
- if (!ret)
- ret = 1;
- }
- return ret;
- }
- /*
- * Returns -ERRNO on error (mapping of new data failed).
- * On success returns:
- * - 0 if there is no more data,
- * - 1 if new receiving (input) data is ready and its address+length
- * have to be written to device (by calling s5p_set_dma_indata()).
- */
- static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
- {
- int ret = 0;
- s5p_unset_indata(dev);
- if (!sg_is_last(dev->sg_src)) {
- ret = s5p_set_indata(dev, sg_next(dev->sg_src));
- if (!ret)
- ret = 1;
- }
- return ret;
- }
- static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
- {
- return __raw_readl(dd->io_hash_base + offset);
- }
- static inline void s5p_hash_write(struct s5p_aes_dev *dd,
- u32 offset, u32 value)
- {
- __raw_writel(value, dd->io_hash_base + offset);
- }
- /**
- * s5p_set_dma_hashdata() - start DMA with sg
- * @dev: device
- * @sg: scatterlist ready to DMA transmit
- */
- static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
- const struct scatterlist *sg)
- {
- dev->hash_sg_cnt--;
- SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
- SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
- }
- /**
- * s5p_hash_rx() - get next hash_sg_iter
- * @dev: device
- *
- * Return:
- * 2 if there is no more data and it is UPDATE op
- * 1 if new receiving (input) data is ready and can be written to device
- * 0 if there is no more data and it is FINAL op
- */
- static int s5p_hash_rx(struct s5p_aes_dev *dev)
- {
- if (dev->hash_sg_cnt > 0) {
- dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
- return 1;
- }
- set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
- if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
- return 0;
- return 2;
- }
- static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
- {
- struct platform_device *pdev = dev_id;
- struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
- struct ablkcipher_request *req;
- int err_dma_tx = 0;
- int err_dma_rx = 0;
- int err_dma_hx = 0;
- bool tx_end = false;
- bool hx_end = false;
- unsigned long flags;
- uint32_t status;
- u32 st_bits;
- int err;
- spin_lock_irqsave(&dev->lock, flags);
- /*
- * Handle rx or tx interrupt. If there is still data (scatterlist did not
- * reach end), then map next scatterlist entry.
- * In case of such mapping error, s5p_aes_complete() should be called.
- *
- * If there is no more data in tx scatter list, call s5p_aes_complete()
- * and schedule new tasklet.
- *
- * Handle hx interrupt. If there is still data map next entry.
- */
- status = SSS_READ(dev, FCINTSTAT);
- if (status & SSS_FCINTSTAT_BRDMAINT)
- err_dma_rx = s5p_aes_rx(dev);
- if (status & SSS_FCINTSTAT_BTDMAINT) {
- if (sg_is_last(dev->sg_dst))
- tx_end = true;
- err_dma_tx = s5p_aes_tx(dev);
- }
- if (status & SSS_FCINTSTAT_HRDMAINT)
- err_dma_hx = s5p_hash_rx(dev);
- st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
- SSS_FCINTSTAT_HRDMAINT);
- /* clear DMA bits */
- SSS_WRITE(dev, FCINTPEND, st_bits);
- /* clear HASH irq bits */
- if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
- /* cannot have both HPART and HDONE */
- if (status & SSS_FCINTSTAT_HPARTINT)
- st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
- if (status & SSS_FCINTSTAT_HDONEINT)
- st_bits = SSS_HASH_STATUS_MSG_DONE;
- set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
- s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
- hx_end = true;
- /* when DONE or PART, do not handle HASH DMA */
- err_dma_hx = 0;
- }
- if (err_dma_rx < 0) {
- err = err_dma_rx;
- goto error;
- }
- if (err_dma_tx < 0) {
- err = err_dma_tx;
- goto error;
- }
- if (tx_end) {
- s5p_sg_done(dev);
- if (err_dma_hx == 1)
- s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
- spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev->req, 0);
- /* Device is still busy */
- tasklet_schedule(&dev->tasklet);
- } else {
- /*
- * Writing length of DMA block (either receiving or
- * transmitting) will start the operation immediately, so this
- * should be done at the end (even after clearing pending
- * interrupts to not miss the interrupt).
- */
- if (err_dma_tx == 1)
- s5p_set_dma_outdata(dev, dev->sg_dst);
- if (err_dma_rx == 1)
- s5p_set_dma_indata(dev, dev->sg_src);
- if (err_dma_hx == 1)
- s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
- spin_unlock_irqrestore(&dev->lock, flags);
- }
- goto hash_irq_end;
- error:
- s5p_sg_done(dev);
- dev->busy = false;
- req = dev->req;
- if (err_dma_hx == 1)
- s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
- spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(req, err);
- hash_irq_end:
- /*
- * Note about else if:
- * when hash_sg_iter reaches end and its UPDATE op,
- * issue SSS_HASH_PAUSE and wait for HPART irq
- */
- if (hx_end)
- tasklet_schedule(&dev->hash_tasklet);
- else if (err_dma_hx == 2)
- s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
- SSS_HASH_PAUSE);
- return IRQ_HANDLED;
- }
- /**
- * s5p_hash_read_msg() - read message or IV from HW
- * @req: AHASH request
- */
- static void s5p_hash_read_msg(struct ahash_request *req)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- struct s5p_aes_dev *dd = ctx->dd;
- u32 *hash = (u32 *)ctx->digest;
- unsigned int i;
- for (i = 0; i < ctx->nregs; i++)
- hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
- }
- /**
- * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
- * @dd: device
- * @ctx: request context
- */
- static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
- const struct s5p_hash_reqctx *ctx)
- {
- const u32 *hash = (const u32 *)ctx->digest;
- unsigned int i;
- for (i = 0; i < ctx->nregs; i++)
- s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
- }
- /**
- * s5p_hash_write_iv() - write IV for next partial/finup op.
- * @req: AHASH request
- */
- static void s5p_hash_write_iv(struct ahash_request *req)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- s5p_hash_write_ctx_iv(ctx->dd, ctx);
- }
- /**
- * s5p_hash_copy_result() - copy digest into req->result
- * @req: AHASH request
- */
- static void s5p_hash_copy_result(struct ahash_request *req)
- {
- const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- if (!req->result)
- return;
- memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
- }
- /**
- * s5p_hash_dma_flush() - flush HASH DMA
- * @dev: secss device
- */
- static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
- {
- SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
- }
- /**
- * s5p_hash_dma_enable() - enable DMA mode for HASH
- * @dev: secss device
- *
- * enable DMA mode for HASH
- */
- static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
- {
- s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
- }
- /**
- * s5p_hash_irq_disable() - disable irq HASH signals
- * @dev: secss device
- * @flags: bitfield with irq's to be disabled
- */
- static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
- {
- SSS_WRITE(dev, FCINTENCLR, flags);
- }
- /**
- * s5p_hash_irq_enable() - enable irq signals
- * @dev: secss device
- * @flags: bitfield with irq's to be enabled
- */
- static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
- {
- SSS_WRITE(dev, FCINTENSET, flags);
- }
- /**
- * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
- * @dev: secss device
- * @hashflow: HASH stream flow with/without crypto AES/DES
- */
- static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
- {
- unsigned long flags;
- u32 flow;
- spin_lock_irqsave(&dev->lock, flags);
- flow = SSS_READ(dev, FCFIFOCTRL);
- flow &= ~SSS_HASHIN_MASK;
- flow |= hashflow;
- SSS_WRITE(dev, FCFIFOCTRL, flow);
- spin_unlock_irqrestore(&dev->lock, flags);
- }
- /**
- * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
- * @dev: secss device
- * @hashflow: HASH stream flow with/without AES/DES
- *
- * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
- * enable HASH irq's HRDMA, HDONE, HPART
- */
- static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
- {
- s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
- SSS_FCINTENCLR_HDONEINTENCLR |
- SSS_FCINTENCLR_HPARTINTENCLR);
- s5p_hash_dma_flush(dev);
- s5p_hash_dma_enable(dev);
- s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
- s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
- SSS_FCINTENSET_HDONEINTENSET |
- SSS_FCINTENSET_HPARTINTENSET);
- }
- /**
- * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
- * @dd: secss device
- * @length: length for request
- * @final: true if final op
- *
- * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
- * after previous updates, fill up IV words. For final, calculate and set
- * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
- * length as 2^63 so it will be never reached and set to zero prelow and
- * prehigh.
- *
- * This function does not start DMA transfer.
- */
- static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
- bool final)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
- u32 prelow, prehigh, low, high;
- u32 configflags, swapflags;
- u64 tmplen;
- configflags = ctx->engine | SSS_HASH_INIT_BIT;
- if (likely(ctx->digcnt)) {
- s5p_hash_write_ctx_iv(dd, ctx);
- configflags |= SSS_HASH_USER_IV_EN;
- }
- if (final) {
- /* number of bytes for last part */
- low = length;
- high = 0;
- /* total number of bits prev hashed */
- tmplen = ctx->digcnt * 8;
- prelow = (u32)tmplen;
- prehigh = (u32)(tmplen >> 32);
- } else {
- prelow = 0;
- prehigh = 0;
- low = 0;
- high = BIT(31);
- }
- swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
- SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
- s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
- s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
- s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
- s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
- s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
- s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
- }
- /**
- * s5p_hash_xmit_dma() - start DMA hash processing
- * @dd: secss device
- * @length: length for request
- * @final: true if final op
- *
- * Update digcnt here, as it is needed for finup/final op.
- */
- static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
- bool final)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
- unsigned int cnt;
- cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
- if (!cnt) {
- dev_err(dd->dev, "dma_map_sg error\n");
- ctx->error = true;
- return -EINVAL;
- }
- set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
- dd->hash_sg_iter = ctx->sg;
- dd->hash_sg_cnt = cnt;
- s5p_hash_write_ctrl(dd, length, final);
- ctx->digcnt += length;
- ctx->total -= length;
- /* catch last interrupt */
- if (final)
- set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
- s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
- return -EINPROGRESS;
- }
- /**
- * s5p_hash_copy_sgs() - copy request's bytes into new buffer
- * @ctx: request context
- * @sg: source scatterlist request
- * @new_len: number of bytes to process from sg
- *
- * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
- * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
- * with allocated buffer.
- *
- * Set bit in dd->hash_flag so we can free it after irq ends processing.
- */
- static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
- struct scatterlist *sg, unsigned int new_len)
- {
- unsigned int pages, len;
- void *buf;
- len = new_len + ctx->bufcnt;
- pages = get_order(len);
- buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
- if (!buf) {
- dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
- ctx->error = true;
- return -ENOMEM;
- }
- if (ctx->bufcnt)
- memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
- scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
- new_len, 0);
- sg_init_table(ctx->sgl, 1);
- sg_set_buf(ctx->sgl, buf, len);
- ctx->sg = ctx->sgl;
- ctx->sg_len = 1;
- ctx->bufcnt = 0;
- ctx->skip = 0;
- set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
- return 0;
- }
- /**
- * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
- * @ctx: request context
- * @sg: source scatterlist request
- * @new_len: number of bytes to process from sg
- *
- * Allocate new scatterlist table, copy data for HASH into it. If there was
- * xmit_buf filled, prepare it first, then copy page, length and offset from
- * source sg into it, adjusting begin and/or end for skip offset and
- * hash_later value.
- *
- * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
- * it after irq ends processing.
- */
- static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
- struct scatterlist *sg, unsigned int new_len)
- {
- unsigned int skip = ctx->skip, n = sg_nents(sg);
- struct scatterlist *tmp;
- unsigned int len;
- if (ctx->bufcnt)
- n++;
- ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
- if (!ctx->sg) {
- ctx->error = true;
- return -ENOMEM;
- }
- sg_init_table(ctx->sg, n);
- tmp = ctx->sg;
- ctx->sg_len = 0;
- if (ctx->bufcnt) {
- sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
- tmp = sg_next(tmp);
- ctx->sg_len++;
- }
- while (sg && skip >= sg->length) {
- skip -= sg->length;
- sg = sg_next(sg);
- }
- while (sg && new_len) {
- len = sg->length - skip;
- if (new_len < len)
- len = new_len;
- new_len -= len;
- sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
- skip = 0;
- if (new_len <= 0)
- sg_mark_end(tmp);
- tmp = sg_next(tmp);
- ctx->sg_len++;
- sg = sg_next(sg);
- }
- set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
- return 0;
- }
- /**
- * s5p_hash_prepare_sgs() - prepare sg for processing
- * @ctx: request context
- * @sg: source scatterlist request
- * @nbytes: number of bytes to process from sg
- * @final: final flag
- *
- * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
- * sg table have good aligned elements (list_ok). If one of this checks fails,
- * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
- * data into this buffer and prepare request in sgl, or (2) allocates new sg
- * table and prepare sg elements.
- *
- * For digest or finup all conditions can be good, and we may not need any
- * fixes.
- */
- static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
- struct scatterlist *sg,
- unsigned int new_len, bool final)
- {
- unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
- bool aligned = true, list_ok = true;
- struct scatterlist *sg_tmp = sg;
- if (!sg || !sg->length || !new_len)
- return 0;
- if (skip || !final)
- list_ok = false;
- while (nbytes > 0 && sg_tmp) {
- n++;
- if (skip >= sg_tmp->length) {
- skip -= sg_tmp->length;
- if (!sg_tmp->length) {
- aligned = false;
- break;
- }
- } else {
- if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
- aligned = false;
- break;
- }
- if (nbytes < sg_tmp->length - skip) {
- list_ok = false;
- break;
- }
- nbytes -= sg_tmp->length - skip;
- skip = 0;
- }
- sg_tmp = sg_next(sg_tmp);
- }
- if (!aligned)
- return s5p_hash_copy_sgs(ctx, sg, new_len);
- else if (!list_ok)
- return s5p_hash_copy_sg_lists(ctx, sg, new_len);
- /*
- * Have aligned data from previous operation and/or current
- * Note: will enter here only if (digest or finup) and aligned
- */
- if (ctx->bufcnt) {
- ctx->sg_len = n;
- sg_init_table(ctx->sgl, 2);
- sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
- sg_chain(ctx->sgl, 2, sg);
- ctx->sg = ctx->sgl;
- ctx->sg_len++;
- } else {
- ctx->sg = sg;
- ctx->sg_len = n;
- }
- return 0;
- }
- /**
- * s5p_hash_prepare_request() - prepare request for processing
- * @req: AHASH request
- * @update: true if UPDATE op
- *
- * Note 1: we can have update flag _and_ final flag at the same time.
- * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
- * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
- * we have final op
- */
- static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- bool final = ctx->finup;
- int xmit_len, hash_later, nbytes;
- int ret;
- if (update)
- nbytes = req->nbytes;
- else
- nbytes = 0;
- ctx->total = nbytes + ctx->bufcnt;
- if (!ctx->total)
- return 0;
- if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
- /* bytes left from previous request, so fill up to BUFLEN */
- int len = BUFLEN - ctx->bufcnt % BUFLEN;
- if (len > nbytes)
- len = nbytes;
- scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
- 0, len, 0);
- ctx->bufcnt += len;
- nbytes -= len;
- ctx->skip = len;
- } else {
- ctx->skip = 0;
- }
- if (ctx->bufcnt)
- memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
- xmit_len = ctx->total;
- if (final) {
- hash_later = 0;
- } else {
- if (IS_ALIGNED(xmit_len, BUFLEN))
- xmit_len -= BUFLEN;
- else
- xmit_len -= xmit_len & (BUFLEN - 1);
- hash_later = ctx->total - xmit_len;
- /* copy hash_later bytes from end of req->src */
- /* previous bytes are in xmit_buf, so no overwrite */
- scatterwalk_map_and_copy(ctx->buffer, req->src,
- req->nbytes - hash_later,
- hash_later, 0);
- }
- if (xmit_len > BUFLEN) {
- ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
- final);
- if (ret)
- return ret;
- } else {
- /* have buffered data only */
- if (unlikely(!ctx->bufcnt)) {
- /* first update didn't fill up buffer */
- scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
- 0, xmit_len, 0);
- }
- sg_init_table(ctx->sgl, 1);
- sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
- ctx->sg = ctx->sgl;
- ctx->sg_len = 1;
- }
- ctx->bufcnt = hash_later;
- if (!final)
- ctx->total = xmit_len;
- return 0;
- }
- /**
- * s5p_hash_update_dma_stop() - unmap DMA
- * @dd: secss device
- *
- * Unmap scatterlist ctx->sg.
- */
- static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
- {
- const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
- dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
- clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
- }
- /**
- * s5p_hash_finish() - copy calculated digest to crypto layer
- * @req: AHASH request
- */
- static void s5p_hash_finish(struct ahash_request *req)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- struct s5p_aes_dev *dd = ctx->dd;
- if (ctx->digcnt)
- s5p_hash_copy_result(req);
- dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
- }
- /**
- * s5p_hash_finish_req() - finish request
- * @req: AHASH request
- * @err: error
- */
- static void s5p_hash_finish_req(struct ahash_request *req, int err)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- struct s5p_aes_dev *dd = ctx->dd;
- unsigned long flags;
- if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
- free_pages((unsigned long)sg_virt(ctx->sg),
- get_order(ctx->sg->length));
- if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
- kfree(ctx->sg);
- ctx->sg = NULL;
- dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
- BIT(HASH_FLAGS_SGS_COPIED));
- if (!err && !ctx->error) {
- s5p_hash_read_msg(req);
- if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
- s5p_hash_finish(req);
- } else {
- ctx->error = true;
- }
- spin_lock_irqsave(&dd->hash_lock, flags);
- dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
- BIT(HASH_FLAGS_DMA_READY) |
- BIT(HASH_FLAGS_OUTPUT_READY));
- spin_unlock_irqrestore(&dd->hash_lock, flags);
- if (req->base.complete)
- req->base.complete(&req->base, err);
- }
- /**
- * s5p_hash_handle_queue() - handle hash queue
- * @dd: device s5p_aes_dev
- * @req: AHASH request
- *
- * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
- * device then processes the first request from the dd->queue
- *
- * Returns: see s5p_hash_final below.
- */
- static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
- struct ahash_request *req)
- {
- struct crypto_async_request *async_req, *backlog;
- struct s5p_hash_reqctx *ctx;
- unsigned long flags;
- int err = 0, ret = 0;
- retry:
- spin_lock_irqsave(&dd->hash_lock, flags);
- if (req)
- ret = ahash_enqueue_request(&dd->hash_queue, req);
- if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
- spin_unlock_irqrestore(&dd->hash_lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&dd->hash_queue);
- async_req = crypto_dequeue_request(&dd->hash_queue);
- if (async_req)
- set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
- spin_unlock_irqrestore(&dd->hash_lock, flags);
- if (!async_req)
- return ret;
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- req = ahash_request_cast(async_req);
- dd->hash_req = req;
- ctx = ahash_request_ctx(req);
- err = s5p_hash_prepare_request(req, ctx->op_update);
- if (err || !ctx->total)
- goto out;
- dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
- ctx->op_update, req->nbytes);
- s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
- if (ctx->digcnt)
- s5p_hash_write_iv(req); /* restore hash IV */
- if (ctx->op_update) { /* HASH_OP_UPDATE */
- err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
- if (err != -EINPROGRESS && ctx->finup && !ctx->error)
- /* no final() after finup() */
- err = s5p_hash_xmit_dma(dd, ctx->total, true);
- } else { /* HASH_OP_FINAL */
- err = s5p_hash_xmit_dma(dd, ctx->total, true);
- }
- out:
- if (err != -EINPROGRESS) {
- /* hash_tasklet_cb will not finish it, so do it here */
- s5p_hash_finish_req(req, err);
- req = NULL;
- /*
- * Execute next request immediately if there is anything
- * in queue.
- */
- goto retry;
- }
- return ret;
- }
- /**
- * s5p_hash_tasklet_cb() - hash tasklet
- * @data: ptr to s5p_aes_dev
- */
- static void s5p_hash_tasklet_cb(unsigned long data)
- {
- struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
- if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
- s5p_hash_handle_queue(dd, NULL);
- return;
- }
- if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
- if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
- &dd->hash_flags)) {
- s5p_hash_update_dma_stop(dd);
- }
- if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
- &dd->hash_flags)) {
- /* hash or semi-hash ready */
- clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
- goto finish;
- }
- }
- return;
- finish:
- /* finish curent request */
- s5p_hash_finish_req(dd->hash_req, 0);
- /* If we are not busy, process next req */
- if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
- s5p_hash_handle_queue(dd, NULL);
- }
- /**
- * s5p_hash_enqueue() - enqueue request
- * @req: AHASH request
- * @op: operation UPDATE (true) or FINAL (false)
- *
- * Returns: see s5p_hash_final below.
- */
- static int s5p_hash_enqueue(struct ahash_request *req, bool op)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- ctx->op_update = op;
- return s5p_hash_handle_queue(tctx->dd, req);
- }
- /**
- * s5p_hash_update() - process the hash input data
- * @req: AHASH request
- *
- * If request will fit in buffer, copy it and return immediately
- * else enqueue it with OP_UPDATE.
- *
- * Returns: see s5p_hash_final below.
- */
- static int s5p_hash_update(struct ahash_request *req)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- if (!req->nbytes)
- return 0;
- if (ctx->bufcnt + req->nbytes <= BUFLEN) {
- scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
- 0, req->nbytes, 0);
- ctx->bufcnt += req->nbytes;
- return 0;
- }
- return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
- }
- /**
- * s5p_hash_shash_digest() - calculate shash digest
- * @tfm: crypto transformation
- * @flags: tfm flags
- * @data: input data
- * @len: length of data
- * @out: output buffer
- */
- static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
- {
- SHASH_DESC_ON_STACK(shash, tfm);
- shash->tfm = tfm;
- shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
- return crypto_shash_digest(shash, data, len, out);
- }
- /**
- * s5p_hash_final_shash() - calculate shash digest
- * @req: AHASH request
- */
- static int s5p_hash_final_shash(struct ahash_request *req)
- {
- struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
- ctx->buffer, ctx->bufcnt, req->result);
- }
- /**
- * s5p_hash_final() - close up hash and calculate digest
- * @req: AHASH request
- *
- * Note: in final req->src do not have any data, and req->nbytes can be
- * non-zero.
- *
- * If there were no input data processed yet and the buffered hash data is
- * less than BUFLEN (64) then calculate the final hash immediately by using
- * SW algorithm fallback.
- *
- * Otherwise enqueues the current AHASH request with OP_FINAL operation op
- * and finalize hash message in HW. Note that if digcnt!=0 then there were
- * previous update op, so there are always some buffered bytes in ctx->buffer,
- * which means that ctx->bufcnt!=0
- *
- * Returns:
- * 0 if the request has been processed immediately,
- * -EINPROGRESS if the operation has been queued for later execution or is set
- * to processing by HW,
- * -EBUSY if queue is full and request should be resubmitted later,
- * other negative values denotes an error.
- */
- static int s5p_hash_final(struct ahash_request *req)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- ctx->finup = true;
- if (ctx->error)
- return -EINVAL; /* uncompleted hash is not needed */
- if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
- return s5p_hash_final_shash(req);
- return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
- }
- /**
- * s5p_hash_finup() - process last req->src and calculate digest
- * @req: AHASH request containing the last update data
- *
- * Return values: see s5p_hash_final above.
- */
- static int s5p_hash_finup(struct ahash_request *req)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- int err1, err2;
- ctx->finup = true;
- err1 = s5p_hash_update(req);
- if (err1 == -EINPROGRESS || err1 == -EBUSY)
- return err1;
- /*
- * final() has to be always called to cleanup resources even if
- * update() failed, except EINPROGRESS or calculate digest for small
- * size
- */
- err2 = s5p_hash_final(req);
- return err1 ?: err2;
- }
- /**
- * s5p_hash_init() - initialize AHASH request contex
- * @req: AHASH request
- *
- * Init async hash request context.
- */
- static int s5p_hash_init(struct ahash_request *req)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
- ctx->dd = tctx->dd;
- ctx->error = false;
- ctx->finup = false;
- ctx->bufcnt = 0;
- ctx->digcnt = 0;
- ctx->total = 0;
- ctx->skip = 0;
- dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
- crypto_ahash_digestsize(tfm));
- switch (crypto_ahash_digestsize(tfm)) {
- case MD5_DIGEST_SIZE:
- ctx->engine = SSS_HASH_ENGINE_MD5;
- ctx->nregs = HASH_MD5_MAX_REG;
- break;
- case SHA1_DIGEST_SIZE:
- ctx->engine = SSS_HASH_ENGINE_SHA1;
- ctx->nregs = HASH_SHA1_MAX_REG;
- break;
- case SHA256_DIGEST_SIZE:
- ctx->engine = SSS_HASH_ENGINE_SHA256;
- ctx->nregs = HASH_SHA256_MAX_REG;
- break;
- default:
- ctx->error = true;
- return -EINVAL;
- }
- return 0;
- }
- /**
- * s5p_hash_digest - calculate digest from req->src
- * @req: AHASH request
- *
- * Return values: see s5p_hash_final above.
- */
- static int s5p_hash_digest(struct ahash_request *req)
- {
- return s5p_hash_init(req) ?: s5p_hash_finup(req);
- }
- /**
- * s5p_hash_cra_init_alg - init crypto alg transformation
- * @tfm: crypto transformation
- */
- static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
- {
- struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
- const char *alg_name = crypto_tfm_alg_name(tfm);
- tctx->dd = s5p_dev;
- /* Allocate a fallback and abort if it failed. */
- tctx->fallback = crypto_alloc_shash(alg_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(tctx->fallback)) {
- pr_err("fallback alloc fails for '%s'\n", alg_name);
- return PTR_ERR(tctx->fallback);
- }
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct s5p_hash_reqctx) + BUFLEN);
- return 0;
- }
- /**
- * s5p_hash_cra_init - init crypto tfm
- * @tfm: crypto transformation
- */
- static int s5p_hash_cra_init(struct crypto_tfm *tfm)
- {
- return s5p_hash_cra_init_alg(tfm);
- }
- /**
- * s5p_hash_cra_exit - exit crypto tfm
- * @tfm: crypto transformation
- *
- * free allocated fallback
- */
- static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
- {
- struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
- crypto_free_shash(tctx->fallback);
- tctx->fallback = NULL;
- }
- /**
- * s5p_hash_export - export hash state
- * @req: AHASH request
- * @out: buffer for exported state
- */
- static int s5p_hash_export(struct ahash_request *req, void *out)
- {
- const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
- return 0;
- }
- /**
- * s5p_hash_import - import hash state
- * @req: AHASH request
- * @in: buffer with state to be imported from
- */
- static int s5p_hash_import(struct ahash_request *req, const void *in)
- {
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
- const struct s5p_hash_reqctx *ctx_in = in;
- memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
- if (ctx_in->bufcnt > BUFLEN) {
- ctx->error = true;
- return -EINVAL;
- }
- ctx->dd = tctx->dd;
- ctx->error = false;
- return 0;
- }
- static struct ahash_alg algs_sha1_md5_sha256[] = {
- {
- .init = s5p_hash_init,
- .update = s5p_hash_update,
- .final = s5p_hash_final,
- .finup = s5p_hash_finup,
- .digest = s5p_hash_digest,
- .export = s5p_hash_export,
- .import = s5p_hash_import,
- .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "sha1",
- .cra_driver_name = "exynos-sha1",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = HASH_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_hash_ctx),
- .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_hash_cra_init,
- .cra_exit = s5p_hash_cra_exit,
- }
- },
- {
- .init = s5p_hash_init,
- .update = s5p_hash_update,
- .final = s5p_hash_final,
- .finup = s5p_hash_finup,
- .digest = s5p_hash_digest,
- .export = s5p_hash_export,
- .import = s5p_hash_import,
- .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
- .halg.digestsize = MD5_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "md5",
- .cra_driver_name = "exynos-md5",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = HASH_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_hash_ctx),
- .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_hash_cra_init,
- .cra_exit = s5p_hash_cra_exit,
- }
- },
- {
- .init = s5p_hash_init,
- .update = s5p_hash_update,
- .final = s5p_hash_final,
- .finup = s5p_hash_finup,
- .digest = s5p_hash_digest,
- .export = s5p_hash_export,
- .import = s5p_hash_import,
- .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "sha256",
- .cra_driver_name = "exynos-sha256",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = HASH_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_hash_ctx),
- .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_hash_cra_init,
- .cra_exit = s5p_hash_cra_exit,
- }
- }
- };
- static void s5p_set_aes(struct s5p_aes_dev *dev,
- const uint8_t *key, const uint8_t *iv,
- unsigned int keylen)
- {
- void __iomem *keystart;
- if (iv)
- memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
- if (keylen == AES_KEYSIZE_256)
- keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
- else if (keylen == AES_KEYSIZE_192)
- keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
- else
- keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
- memcpy_toio(keystart, key, keylen);
- }
- static bool s5p_is_sg_aligned(struct scatterlist *sg)
- {
- while (sg) {
- if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
- return false;
- sg = sg_next(sg);
- }
- return true;
- }
- static int s5p_set_indata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
- {
- struct scatterlist *sg;
- int err;
- dev->sg_src_cpy = NULL;
- sg = req->src;
- if (!s5p_is_sg_aligned(sg)) {
- dev_dbg(dev->dev,
- "At least one unaligned source scatter list, making a copy\n");
- err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
- if (err)
- return err;
- sg = dev->sg_src_cpy;
- }
- err = s5p_set_indata(dev, sg);
- if (err) {
- s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
- return err;
- }
- return 0;
- }
- static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
- {
- struct scatterlist *sg;
- int err;
- dev->sg_dst_cpy = NULL;
- sg = req->dst;
- if (!s5p_is_sg_aligned(sg)) {
- dev_dbg(dev->dev,
- "At least one unaligned dest scatter list, making a copy\n");
- err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
- if (err)
- return err;
- sg = dev->sg_dst_cpy;
- }
- err = s5p_set_outdata(dev, sg);
- if (err) {
- s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
- return err;
- }
- return 0;
- }
- static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
- {
- struct ablkcipher_request *req = dev->req;
- uint32_t aes_control;
- unsigned long flags;
- int err;
- u8 *iv;
- aes_control = SSS_AES_KEY_CHANGE_MODE;
- if (mode & FLAGS_AES_DECRYPT)
- aes_control |= SSS_AES_MODE_DECRYPT;
- if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
- aes_control |= SSS_AES_CHAIN_MODE_CBC;
- iv = req->info;
- } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
- aes_control |= SSS_AES_CHAIN_MODE_CTR;
- iv = req->info;
- } else {
- iv = NULL; /* AES_ECB */
- }
- if (dev->ctx->keylen == AES_KEYSIZE_192)
- aes_control |= SSS_AES_KEY_SIZE_192;
- else if (dev->ctx->keylen == AES_KEYSIZE_256)
- aes_control |= SSS_AES_KEY_SIZE_256;
- aes_control |= SSS_AES_FIFO_MODE;
- /* as a variant it is possible to use byte swapping on DMA side */
- aes_control |= SSS_AES_BYTESWAP_DI
- | SSS_AES_BYTESWAP_DO
- | SSS_AES_BYTESWAP_IV
- | SSS_AES_BYTESWAP_KEY
- | SSS_AES_BYTESWAP_CNT;
- spin_lock_irqsave(&dev->lock, flags);
- SSS_WRITE(dev, FCINTENCLR,
- SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
- SSS_WRITE(dev, FCFIFOCTRL, 0x00);
- err = s5p_set_indata_start(dev, req);
- if (err)
- goto indata_error;
- err = s5p_set_outdata_start(dev, req);
- if (err)
- goto outdata_error;
- SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
- s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
- s5p_set_dma_indata(dev, dev->sg_src);
- s5p_set_dma_outdata(dev, dev->sg_dst);
- SSS_WRITE(dev, FCINTENSET,
- SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
- spin_unlock_irqrestore(&dev->lock, flags);
- return;
- outdata_error:
- s5p_unset_indata(dev);
- indata_error:
- s5p_sg_done(dev);
- dev->busy = false;
- spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(req, err);
- }
- static void s5p_tasklet_cb(unsigned long data)
- {
- struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
- struct crypto_async_request *async_req, *backlog;
- struct s5p_aes_reqctx *reqctx;
- unsigned long flags;
- spin_lock_irqsave(&dev->lock, flags);
- backlog = crypto_get_backlog(&dev->queue);
- async_req = crypto_dequeue_request(&dev->queue);
- if (!async_req) {
- dev->busy = false;
- spin_unlock_irqrestore(&dev->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- dev->req = ablkcipher_request_cast(async_req);
- dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
- reqctx = ablkcipher_request_ctx(dev->req);
- s5p_aes_crypt_start(dev, reqctx->mode);
- }
- static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
- {
- unsigned long flags;
- int err;
- spin_lock_irqsave(&dev->lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
- if (dev->busy) {
- spin_unlock_irqrestore(&dev->lock, flags);
- goto exit;
- }
- dev->busy = true;
- spin_unlock_irqrestore(&dev->lock, flags);
- tasklet_schedule(&dev->tasklet);
- exit:
- return err;
- }
- static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
- {
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
- struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct s5p_aes_dev *dev = ctx->dev;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
- dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
- return -EINVAL;
- }
- reqctx->mode = mode;
- return s5p_aes_handle_req(dev, req);
- }
- static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
- const uint8_t *key, unsigned int keylen)
- {
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (keylen != AES_KEYSIZE_128 &&
- keylen != AES_KEYSIZE_192 &&
- keylen != AES_KEYSIZE_256)
- return -EINVAL;
- memcpy(ctx->aes_key, key, keylen);
- ctx->keylen = keylen;
- return 0;
- }
- static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
- {
- return s5p_aes_crypt(req, 0);
- }
- static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
- {
- return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
- }
- static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
- {
- return s5p_aes_crypt(req, FLAGS_AES_CBC);
- }
- static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
- {
- return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
- }
- static int s5p_aes_cra_init(struct crypto_tfm *tfm)
- {
- struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->dev = s5p_dev;
- tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
- return 0;
- }
- static struct crypto_alg algs[] = {
- {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_ecb_encrypt,
- .decrypt = s5p_aes_ecb_decrypt,
- }
- },
- {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-s5p",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_aes_ctx),
- .cra_alignmask = 0x0f,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_aes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = s5p_aes_setkey,
- .encrypt = s5p_aes_cbc_encrypt,
- .decrypt = s5p_aes_cbc_decrypt,
- }
- },
- };
- static int s5p_aes_probe(struct platform_device *pdev)
- {
- struct device *dev = &pdev->dev;
- int i, j, err = -ENODEV;
- const struct samsung_aes_variant *variant;
- struct s5p_aes_dev *pdata;
- struct resource *res;
- unsigned int hash_i;
- if (s5p_dev)
- return -EEXIST;
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
- variant = find_s5p_sss_version(pdev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /*
- * Note: HASH and PRNG uses the same registers in secss, avoid
- * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
- * is enabled in config. We need larger size for HASH registers in
- * secss, current describe only AES/DES
- */
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
- if (variant == &exynos_aes_data) {
- res->end += 0x300;
- pdata->use_hash = true;
- }
- }
- pdata->res = res;
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pdata->ioaddr)) {
- if (!pdata->use_hash)
- return PTR_ERR(pdata->ioaddr);
- /* try AES without HASH */
- res->end -= 0x300;
- pdata->use_hash = false;
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pdata->ioaddr))
- return PTR_ERR(pdata->ioaddr);
- }
- pdata->clk = devm_clk_get(dev, "secss");
- if (IS_ERR(pdata->clk)) {
- dev_err(dev, "failed to find secss clock source\n");
- return -ENOENT;
- }
- err = clk_prepare_enable(pdata->clk);
- if (err < 0) {
- dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
- return err;
- }
- spin_lock_init(&pdata->lock);
- spin_lock_init(&pdata->hash_lock);
- pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
- pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
- pdata->irq_fc = platform_get_irq(pdev, 0);
- if (pdata->irq_fc < 0) {
- err = pdata->irq_fc;
- dev_warn(dev, "feed control interrupt is not available.\n");
- goto err_irq;
- }
- err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
- s5p_aes_interrupt, IRQF_ONESHOT,
- pdev->name, pdev);
- if (err < 0) {
- dev_warn(dev, "feed control interrupt is not available.\n");
- goto err_irq;
- }
- pdata->busy = false;
- pdata->dev = dev;
- platform_set_drvdata(pdev, pdata);
- s5p_dev = pdata;
- tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
- crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
- for (i = 0; i < ARRAY_SIZE(algs); i++) {
- err = crypto_register_alg(&algs[i]);
- if (err)
- goto err_algs;
- }
- if (pdata->use_hash) {
- tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
- (unsigned long)pdata);
- crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
- for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
- hash_i++) {
- struct ahash_alg *alg;
- alg = &algs_sha1_md5_sha256[hash_i];
- err = crypto_register_ahash(alg);
- if (err) {
- dev_err(dev, "can't register '%s': %d\n",
- alg->halg.base.cra_driver_name, err);
- goto err_hash;
- }
- }
- }
- dev_info(dev, "s5p-sss driver registered\n");
- return 0;
- err_hash:
- for (j = hash_i - 1; j >= 0; j--)
- crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
- tasklet_kill(&pdata->hash_tasklet);
- res->end -= 0x300;
- err_algs:
- if (i < ARRAY_SIZE(algs))
- dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
- err);
- for (j = 0; j < i; j++)
- crypto_unregister_alg(&algs[j]);
- tasklet_kill(&pdata->tasklet);
- err_irq:
- clk_disable_unprepare(pdata->clk);
- s5p_dev = NULL;
- return err;
- }
- static int s5p_aes_remove(struct platform_device *pdev)
- {
- struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
- int i;
- if (!pdata)
- return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_alg(&algs[i]);
- tasklet_kill(&pdata->tasklet);
- if (pdata->use_hash) {
- for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
- crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
- pdata->res->end -= 0x300;
- tasklet_kill(&pdata->hash_tasklet);
- pdata->use_hash = false;
- }
- clk_disable_unprepare(pdata->clk);
- s5p_dev = NULL;
- return 0;
- }
- static struct platform_driver s5p_aes_crypto = {
- .probe = s5p_aes_probe,
- .remove = s5p_aes_remove,
- .driver = {
- .name = "s5p-secss",
- .of_match_table = s5p_sss_dt_match,
- },
- };
- module_platform_driver(s5p_aes_crypto);
- MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
- MODULE_LICENSE("GPL v2");
- MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
- MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");
|