rndis_filter.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2009, Microsoft Corporation.
  4. *
  5. * Authors:
  6. * Haiyang Zhang <haiyangz@microsoft.com>
  7. * Hank Janssen <hjanssen@microsoft.com>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/wait.h>
  12. #include <linux/highmem.h>
  13. #include <linux/slab.h>
  14. #include <linux/io.h>
  15. #include <linux/if_ether.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/if_vlan.h>
  18. #include <linux/nls.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/rtnetlink.h>
  21. #include <linux/ucs2_string.h>
  22. #include "hyperv_net.h"
  23. #include "netvsc_trace.h"
  24. static void rndis_set_multicast(struct work_struct *w);
  25. #define RNDIS_EXT_LEN PAGE_SIZE
  26. struct rndis_request {
  27. struct list_head list_ent;
  28. struct completion wait_event;
  29. struct rndis_message response_msg;
  30. /*
  31. * The buffer for extended info after the RNDIS response message. It's
  32. * referenced based on the data offset in the RNDIS message. Its size
  33. * is enough for current needs, and should be sufficient for the near
  34. * future.
  35. */
  36. u8 response_ext[RNDIS_EXT_LEN];
  37. /* Simplify allocation by having a netvsc packet inline */
  38. struct hv_netvsc_packet pkt;
  39. struct rndis_message request_msg;
  40. /*
  41. * The buffer for the extended info after the RNDIS request message.
  42. * It is referenced and sized in a similar way as response_ext.
  43. */
  44. u8 request_ext[RNDIS_EXT_LEN];
  45. };
  46. static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
  47. 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
  48. 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
  49. 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
  50. 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
  51. 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
  52. };
  53. static struct rndis_device *get_rndis_device(void)
  54. {
  55. struct rndis_device *device;
  56. device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
  57. if (!device)
  58. return NULL;
  59. spin_lock_init(&device->request_lock);
  60. INIT_LIST_HEAD(&device->req_list);
  61. INIT_WORK(&device->mcast_work, rndis_set_multicast);
  62. device->state = RNDIS_DEV_UNINITIALIZED;
  63. return device;
  64. }
  65. static struct rndis_request *get_rndis_request(struct rndis_device *dev,
  66. u32 msg_type,
  67. u32 msg_len)
  68. {
  69. struct rndis_request *request;
  70. struct rndis_message *rndis_msg;
  71. struct rndis_set_request *set;
  72. unsigned long flags;
  73. request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
  74. if (!request)
  75. return NULL;
  76. init_completion(&request->wait_event);
  77. rndis_msg = &request->request_msg;
  78. rndis_msg->ndis_msg_type = msg_type;
  79. rndis_msg->msg_len = msg_len;
  80. request->pkt.q_idx = 0;
  81. /*
  82. * Set the request id. This field is always after the rndis header for
  83. * request/response packet types so we just used the SetRequest as a
  84. * template
  85. */
  86. set = &rndis_msg->msg.set_req;
  87. set->req_id = atomic_inc_return(&dev->new_req_id);
  88. /* Add to the request list */
  89. spin_lock_irqsave(&dev->request_lock, flags);
  90. list_add_tail(&request->list_ent, &dev->req_list);
  91. spin_unlock_irqrestore(&dev->request_lock, flags);
  92. return request;
  93. }
  94. static void put_rndis_request(struct rndis_device *dev,
  95. struct rndis_request *req)
  96. {
  97. unsigned long flags;
  98. spin_lock_irqsave(&dev->request_lock, flags);
  99. list_del(&req->list_ent);
  100. spin_unlock_irqrestore(&dev->request_lock, flags);
  101. kfree(req);
  102. }
  103. static void dump_rndis_message(struct net_device *netdev,
  104. const struct rndis_message *rndis_msg)
  105. {
  106. switch (rndis_msg->ndis_msg_type) {
  107. case RNDIS_MSG_PACKET:
  108. netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
  109. "data offset %u data len %u, # oob %u, "
  110. "oob offset %u, oob len %u, pkt offset %u, "
  111. "pkt len %u\n",
  112. rndis_msg->msg_len,
  113. rndis_msg->msg.pkt.data_offset,
  114. rndis_msg->msg.pkt.data_len,
  115. rndis_msg->msg.pkt.num_oob_data_elements,
  116. rndis_msg->msg.pkt.oob_data_offset,
  117. rndis_msg->msg.pkt.oob_data_len,
  118. rndis_msg->msg.pkt.per_pkt_info_offset,
  119. rndis_msg->msg.pkt.per_pkt_info_len);
  120. break;
  121. case RNDIS_MSG_INIT_C:
  122. netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
  123. "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
  124. "device flags %d, max xfer size 0x%x, max pkts %u, "
  125. "pkt aligned %u)\n",
  126. rndis_msg->msg_len,
  127. rndis_msg->msg.init_complete.req_id,
  128. rndis_msg->msg.init_complete.status,
  129. rndis_msg->msg.init_complete.major_ver,
  130. rndis_msg->msg.init_complete.minor_ver,
  131. rndis_msg->msg.init_complete.dev_flags,
  132. rndis_msg->msg.init_complete.max_xfer_size,
  133. rndis_msg->msg.init_complete.
  134. max_pkt_per_msg,
  135. rndis_msg->msg.init_complete.
  136. pkt_alignment_factor);
  137. break;
  138. case RNDIS_MSG_QUERY_C:
  139. netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
  140. "(len %u, id 0x%x, status 0x%x, buf len %u, "
  141. "buf offset %u)\n",
  142. rndis_msg->msg_len,
  143. rndis_msg->msg.query_complete.req_id,
  144. rndis_msg->msg.query_complete.status,
  145. rndis_msg->msg.query_complete.
  146. info_buflen,
  147. rndis_msg->msg.query_complete.
  148. info_buf_offset);
  149. break;
  150. case RNDIS_MSG_SET_C:
  151. netdev_dbg(netdev,
  152. "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
  153. rndis_msg->msg_len,
  154. rndis_msg->msg.set_complete.req_id,
  155. rndis_msg->msg.set_complete.status);
  156. break;
  157. case RNDIS_MSG_INDICATE:
  158. netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
  159. "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
  160. rndis_msg->msg_len,
  161. rndis_msg->msg.indicate_status.status,
  162. rndis_msg->msg.indicate_status.status_buflen,
  163. rndis_msg->msg.indicate_status.status_buf_offset);
  164. break;
  165. default:
  166. netdev_dbg(netdev, "0x%x (len %u)\n",
  167. rndis_msg->ndis_msg_type,
  168. rndis_msg->msg_len);
  169. break;
  170. }
  171. }
  172. static int rndis_filter_send_request(struct rndis_device *dev,
  173. struct rndis_request *req)
  174. {
  175. struct hv_netvsc_packet *packet;
  176. struct hv_page_buffer page_buf[2];
  177. struct hv_page_buffer *pb = page_buf;
  178. int ret;
  179. /* Setup the packet to send it */
  180. packet = &req->pkt;
  181. packet->total_data_buflen = req->request_msg.msg_len;
  182. packet->page_buf_cnt = 1;
  183. pb[0].pfn = virt_to_phys(&req->request_msg) >>
  184. PAGE_SHIFT;
  185. pb[0].len = req->request_msg.msg_len;
  186. pb[0].offset =
  187. (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
  188. /* Add one page_buf when request_msg crossing page boundary */
  189. if (pb[0].offset + pb[0].len > PAGE_SIZE) {
  190. packet->page_buf_cnt++;
  191. pb[0].len = PAGE_SIZE -
  192. pb[0].offset;
  193. pb[1].pfn = virt_to_phys((void *)&req->request_msg
  194. + pb[0].len) >> PAGE_SHIFT;
  195. pb[1].offset = 0;
  196. pb[1].len = req->request_msg.msg_len -
  197. pb[0].len;
  198. }
  199. trace_rndis_send(dev->ndev, 0, &req->request_msg);
  200. rcu_read_lock_bh();
  201. ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
  202. rcu_read_unlock_bh();
  203. return ret;
  204. }
  205. static void rndis_set_link_state(struct rndis_device *rdev,
  206. struct rndis_request *request)
  207. {
  208. u32 link_status;
  209. struct rndis_query_complete *query_complete;
  210. query_complete = &request->response_msg.msg.query_complete;
  211. if (query_complete->status == RNDIS_STATUS_SUCCESS &&
  212. query_complete->info_buflen == sizeof(u32)) {
  213. memcpy(&link_status, (void *)((unsigned long)query_complete +
  214. query_complete->info_buf_offset), sizeof(u32));
  215. rdev->link_state = link_status != 0;
  216. }
  217. }
  218. static void rndis_filter_receive_response(struct net_device *ndev,
  219. struct netvsc_device *nvdev,
  220. const struct rndis_message *resp)
  221. {
  222. struct rndis_device *dev = nvdev->extension;
  223. struct rndis_request *request = NULL;
  224. bool found = false;
  225. unsigned long flags;
  226. /* This should never happen, it means control message
  227. * response received after device removed.
  228. */
  229. if (dev->state == RNDIS_DEV_UNINITIALIZED) {
  230. netdev_err(ndev,
  231. "got rndis message uninitialized\n");
  232. return;
  233. }
  234. spin_lock_irqsave(&dev->request_lock, flags);
  235. list_for_each_entry(request, &dev->req_list, list_ent) {
  236. /*
  237. * All request/response message contains RequestId as the 1st
  238. * field
  239. */
  240. if (request->request_msg.msg.init_req.req_id
  241. == resp->msg.init_complete.req_id) {
  242. found = true;
  243. break;
  244. }
  245. }
  246. spin_unlock_irqrestore(&dev->request_lock, flags);
  247. if (found) {
  248. if (resp->msg_len <=
  249. sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
  250. memcpy(&request->response_msg, resp,
  251. resp->msg_len);
  252. if (request->request_msg.ndis_msg_type ==
  253. RNDIS_MSG_QUERY && request->request_msg.msg.
  254. query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
  255. rndis_set_link_state(dev, request);
  256. } else {
  257. netdev_err(ndev,
  258. "rndis response buffer overflow "
  259. "detected (size %u max %zu)\n",
  260. resp->msg_len,
  261. sizeof(struct rndis_message));
  262. if (resp->ndis_msg_type ==
  263. RNDIS_MSG_RESET_C) {
  264. /* does not have a request id field */
  265. request->response_msg.msg.reset_complete.
  266. status = RNDIS_STATUS_BUFFER_OVERFLOW;
  267. } else {
  268. request->response_msg.msg.
  269. init_complete.status =
  270. RNDIS_STATUS_BUFFER_OVERFLOW;
  271. }
  272. }
  273. complete(&request->wait_event);
  274. } else {
  275. netdev_err(ndev,
  276. "no rndis request found for this response "
  277. "(id 0x%x res type 0x%x)\n",
  278. resp->msg.init_complete.req_id,
  279. resp->ndis_msg_type);
  280. }
  281. }
  282. /*
  283. * Get the Per-Packet-Info with the specified type
  284. * return NULL if not found.
  285. */
  286. static inline void *rndis_get_ppi(struct rndis_packet *rpkt,
  287. u32 type, u8 internal)
  288. {
  289. struct rndis_per_packet_info *ppi;
  290. int len;
  291. if (rpkt->per_pkt_info_offset == 0)
  292. return NULL;
  293. ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
  294. rpkt->per_pkt_info_offset);
  295. len = rpkt->per_pkt_info_len;
  296. while (len > 0) {
  297. if (ppi->type == type && ppi->internal == internal)
  298. return (void *)((ulong)ppi + ppi->ppi_offset);
  299. len -= ppi->size;
  300. ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
  301. }
  302. return NULL;
  303. }
  304. static inline
  305. void rsc_add_data(struct netvsc_channel *nvchan,
  306. const struct ndis_pkt_8021q_info *vlan,
  307. const struct ndis_tcp_ip_checksum_info *csum_info,
  308. void *data, u32 len)
  309. {
  310. u32 cnt = nvchan->rsc.cnt;
  311. if (cnt) {
  312. nvchan->rsc.pktlen += len;
  313. } else {
  314. nvchan->rsc.vlan = vlan;
  315. nvchan->rsc.csum_info = csum_info;
  316. nvchan->rsc.pktlen = len;
  317. }
  318. nvchan->rsc.data[cnt] = data;
  319. nvchan->rsc.len[cnt] = len;
  320. nvchan->rsc.cnt++;
  321. }
  322. static int rndis_filter_receive_data(struct net_device *ndev,
  323. struct netvsc_device *nvdev,
  324. struct netvsc_channel *nvchan,
  325. struct rndis_message *msg,
  326. u32 data_buflen)
  327. {
  328. struct rndis_packet *rndis_pkt = &msg->msg.pkt;
  329. const struct ndis_tcp_ip_checksum_info *csum_info;
  330. const struct ndis_pkt_8021q_info *vlan;
  331. const struct rndis_pktinfo_id *pktinfo_id;
  332. u32 data_offset;
  333. void *data;
  334. bool rsc_more = false;
  335. int ret;
  336. /* Remove the rndis header and pass it back up the stack */
  337. data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
  338. data_buflen -= data_offset;
  339. /*
  340. * Make sure we got a valid RNDIS message, now total_data_buflen
  341. * should be the data packet size plus the trailer padding size
  342. */
  343. if (unlikely(data_buflen < rndis_pkt->data_len)) {
  344. netdev_err(ndev, "rndis message buffer "
  345. "overflow detected (got %u, min %u)"
  346. "...dropping this message!\n",
  347. data_buflen, rndis_pkt->data_len);
  348. return NVSP_STAT_FAIL;
  349. }
  350. vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO, 0);
  351. csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0);
  352. pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1);
  353. data = (void *)msg + data_offset;
  354. /* Identify RSC frags, drop erroneous packets */
  355. if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
  356. if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
  357. nvchan->rsc.cnt = 0;
  358. else if (nvchan->rsc.cnt == 0)
  359. goto drop;
  360. rsc_more = true;
  361. if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
  362. rsc_more = false;
  363. if (rsc_more && nvchan->rsc.is_last)
  364. goto drop;
  365. } else {
  366. nvchan->rsc.cnt = 0;
  367. }
  368. if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
  369. goto drop;
  370. /* Put data into per channel structure.
  371. * Also, remove the rndis trailer padding from rndis packet message
  372. * rndis_pkt->data_len tell us the real data length, we only copy
  373. * the data packet to the stack, without the rndis trailer padding
  374. */
  375. rsc_add_data(nvchan, vlan, csum_info, data, rndis_pkt->data_len);
  376. if (rsc_more)
  377. return NVSP_STAT_SUCCESS;
  378. ret = netvsc_recv_callback(ndev, nvdev, nvchan);
  379. nvchan->rsc.cnt = 0;
  380. return ret;
  381. drop:
  382. /* Drop incomplete packet */
  383. nvchan->rsc.cnt = 0;
  384. return NVSP_STAT_FAIL;
  385. }
  386. int rndis_filter_receive(struct net_device *ndev,
  387. struct netvsc_device *net_dev,
  388. struct netvsc_channel *nvchan,
  389. void *data, u32 buflen)
  390. {
  391. struct net_device_context *net_device_ctx = netdev_priv(ndev);
  392. struct rndis_message *rndis_msg = data;
  393. if (netif_msg_rx_status(net_device_ctx))
  394. dump_rndis_message(ndev, rndis_msg);
  395. switch (rndis_msg->ndis_msg_type) {
  396. case RNDIS_MSG_PACKET:
  397. return rndis_filter_receive_data(ndev, net_dev, nvchan,
  398. rndis_msg, buflen);
  399. case RNDIS_MSG_INIT_C:
  400. case RNDIS_MSG_QUERY_C:
  401. case RNDIS_MSG_SET_C:
  402. /* completion msgs */
  403. rndis_filter_receive_response(ndev, net_dev, rndis_msg);
  404. break;
  405. case RNDIS_MSG_INDICATE:
  406. /* notification msgs */
  407. netvsc_linkstatus_callback(ndev, rndis_msg);
  408. break;
  409. default:
  410. netdev_err(ndev,
  411. "unhandled rndis message (type %u len %u)\n",
  412. rndis_msg->ndis_msg_type,
  413. rndis_msg->msg_len);
  414. return NVSP_STAT_FAIL;
  415. }
  416. return NVSP_STAT_SUCCESS;
  417. }
  418. static int rndis_filter_query_device(struct rndis_device *dev,
  419. struct netvsc_device *nvdev,
  420. u32 oid, void *result, u32 *result_size)
  421. {
  422. struct rndis_request *request;
  423. u32 inresult_size = *result_size;
  424. struct rndis_query_request *query;
  425. struct rndis_query_complete *query_complete;
  426. int ret = 0;
  427. if (!result)
  428. return -EINVAL;
  429. *result_size = 0;
  430. request = get_rndis_request(dev, RNDIS_MSG_QUERY,
  431. RNDIS_MESSAGE_SIZE(struct rndis_query_request));
  432. if (!request) {
  433. ret = -ENOMEM;
  434. goto cleanup;
  435. }
  436. /* Setup the rndis query */
  437. query = &request->request_msg.msg.query_req;
  438. query->oid = oid;
  439. query->info_buf_offset = sizeof(struct rndis_query_request);
  440. query->info_buflen = 0;
  441. query->dev_vc_handle = 0;
  442. if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
  443. struct ndis_offload *hwcaps;
  444. u32 nvsp_version = nvdev->nvsp_version;
  445. u8 ndis_rev;
  446. size_t size;
  447. if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
  448. ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
  449. size = NDIS_OFFLOAD_SIZE;
  450. } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
  451. ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
  452. size = NDIS_OFFLOAD_SIZE_6_1;
  453. } else {
  454. ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
  455. size = NDIS_OFFLOAD_SIZE_6_0;
  456. }
  457. request->request_msg.msg_len += size;
  458. query->info_buflen = size;
  459. hwcaps = (struct ndis_offload *)
  460. ((unsigned long)query + query->info_buf_offset);
  461. hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
  462. hwcaps->header.revision = ndis_rev;
  463. hwcaps->header.size = size;
  464. } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
  465. struct ndis_recv_scale_cap *cap;
  466. request->request_msg.msg_len +=
  467. sizeof(struct ndis_recv_scale_cap);
  468. query->info_buflen = sizeof(struct ndis_recv_scale_cap);
  469. cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
  470. query->info_buf_offset);
  471. cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
  472. cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
  473. cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
  474. }
  475. ret = rndis_filter_send_request(dev, request);
  476. if (ret != 0)
  477. goto cleanup;
  478. wait_for_completion(&request->wait_event);
  479. /* Copy the response back */
  480. query_complete = &request->response_msg.msg.query_complete;
  481. if (query_complete->info_buflen > inresult_size) {
  482. ret = -1;
  483. goto cleanup;
  484. }
  485. memcpy(result,
  486. (void *)((unsigned long)query_complete +
  487. query_complete->info_buf_offset),
  488. query_complete->info_buflen);
  489. *result_size = query_complete->info_buflen;
  490. cleanup:
  491. if (request)
  492. put_rndis_request(dev, request);
  493. return ret;
  494. }
  495. /* Get the hardware offload capabilities */
  496. static int
  497. rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
  498. struct ndis_offload *caps)
  499. {
  500. u32 caps_len = sizeof(*caps);
  501. int ret;
  502. memset(caps, 0, sizeof(*caps));
  503. ret = rndis_filter_query_device(dev, net_device,
  504. OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
  505. caps, &caps_len);
  506. if (ret)
  507. return ret;
  508. if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
  509. netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
  510. caps->header.type);
  511. return -EINVAL;
  512. }
  513. if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
  514. netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
  515. caps->header.revision);
  516. return -EINVAL;
  517. }
  518. if (caps->header.size > caps_len ||
  519. caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
  520. netdev_warn(dev->ndev,
  521. "invalid NDIS objsize %u, data size %u\n",
  522. caps->header.size, caps_len);
  523. return -EINVAL;
  524. }
  525. return 0;
  526. }
  527. static int rndis_filter_query_device_mac(struct rndis_device *dev,
  528. struct netvsc_device *net_device)
  529. {
  530. u32 size = ETH_ALEN;
  531. return rndis_filter_query_device(dev, net_device,
  532. RNDIS_OID_802_3_PERMANENT_ADDRESS,
  533. dev->hw_mac_adr, &size);
  534. }
  535. #define NWADR_STR "NetworkAddress"
  536. #define NWADR_STRLEN 14
  537. int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
  538. const char *mac)
  539. {
  540. struct rndis_device *rdev = nvdev->extension;
  541. struct rndis_request *request;
  542. struct rndis_set_request *set;
  543. struct rndis_config_parameter_info *cpi;
  544. wchar_t *cfg_nwadr, *cfg_mac;
  545. struct rndis_set_complete *set_complete;
  546. char macstr[2*ETH_ALEN+1];
  547. u32 extlen = sizeof(struct rndis_config_parameter_info) +
  548. 2*NWADR_STRLEN + 4*ETH_ALEN;
  549. int ret;
  550. request = get_rndis_request(rdev, RNDIS_MSG_SET,
  551. RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
  552. if (!request)
  553. return -ENOMEM;
  554. set = &request->request_msg.msg.set_req;
  555. set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
  556. set->info_buflen = extlen;
  557. set->info_buf_offset = sizeof(struct rndis_set_request);
  558. set->dev_vc_handle = 0;
  559. cpi = (struct rndis_config_parameter_info *)((ulong)set +
  560. set->info_buf_offset);
  561. cpi->parameter_name_offset =
  562. sizeof(struct rndis_config_parameter_info);
  563. /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
  564. cpi->parameter_name_length = 2*NWADR_STRLEN;
  565. cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
  566. cpi->parameter_value_offset =
  567. cpi->parameter_name_offset + cpi->parameter_name_length;
  568. /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
  569. cpi->parameter_value_length = 4*ETH_ALEN;
  570. cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
  571. cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
  572. ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
  573. cfg_nwadr, NWADR_STRLEN);
  574. if (ret < 0)
  575. goto cleanup;
  576. snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
  577. ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
  578. cfg_mac, 2*ETH_ALEN);
  579. if (ret < 0)
  580. goto cleanup;
  581. ret = rndis_filter_send_request(rdev, request);
  582. if (ret != 0)
  583. goto cleanup;
  584. wait_for_completion(&request->wait_event);
  585. set_complete = &request->response_msg.msg.set_complete;
  586. if (set_complete->status != RNDIS_STATUS_SUCCESS)
  587. ret = -EIO;
  588. cleanup:
  589. put_rndis_request(rdev, request);
  590. return ret;
  591. }
  592. int
  593. rndis_filter_set_offload_params(struct net_device *ndev,
  594. struct netvsc_device *nvdev,
  595. struct ndis_offload_params *req_offloads)
  596. {
  597. struct rndis_device *rdev = nvdev->extension;
  598. struct rndis_request *request;
  599. struct rndis_set_request *set;
  600. struct ndis_offload_params *offload_params;
  601. struct rndis_set_complete *set_complete;
  602. u32 extlen = sizeof(struct ndis_offload_params);
  603. int ret;
  604. u32 vsp_version = nvdev->nvsp_version;
  605. if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
  606. extlen = VERSION_4_OFFLOAD_SIZE;
  607. /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
  608. * UDP checksum offload.
  609. */
  610. req_offloads->udp_ip_v4_csum = 0;
  611. req_offloads->udp_ip_v6_csum = 0;
  612. }
  613. request = get_rndis_request(rdev, RNDIS_MSG_SET,
  614. RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
  615. if (!request)
  616. return -ENOMEM;
  617. set = &request->request_msg.msg.set_req;
  618. set->oid = OID_TCP_OFFLOAD_PARAMETERS;
  619. set->info_buflen = extlen;
  620. set->info_buf_offset = sizeof(struct rndis_set_request);
  621. set->dev_vc_handle = 0;
  622. offload_params = (struct ndis_offload_params *)((ulong)set +
  623. set->info_buf_offset);
  624. *offload_params = *req_offloads;
  625. offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
  626. offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
  627. offload_params->header.size = extlen;
  628. ret = rndis_filter_send_request(rdev, request);
  629. if (ret != 0)
  630. goto cleanup;
  631. wait_for_completion(&request->wait_event);
  632. set_complete = &request->response_msg.msg.set_complete;
  633. if (set_complete->status != RNDIS_STATUS_SUCCESS) {
  634. netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
  635. set_complete->status);
  636. ret = -EINVAL;
  637. }
  638. cleanup:
  639. put_rndis_request(rdev, request);
  640. return ret;
  641. }
  642. static int rndis_set_rss_param_msg(struct rndis_device *rdev,
  643. const u8 *rss_key, u16 flag)
  644. {
  645. struct net_device *ndev = rdev->ndev;
  646. struct net_device_context *ndc = netdev_priv(ndev);
  647. struct rndis_request *request;
  648. struct rndis_set_request *set;
  649. struct rndis_set_complete *set_complete;
  650. u32 extlen = sizeof(struct ndis_recv_scale_param) +
  651. 4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
  652. struct ndis_recv_scale_param *rssp;
  653. u32 *itab;
  654. u8 *keyp;
  655. int i, ret;
  656. request = get_rndis_request(
  657. rdev, RNDIS_MSG_SET,
  658. RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
  659. if (!request)
  660. return -ENOMEM;
  661. set = &request->request_msg.msg.set_req;
  662. set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
  663. set->info_buflen = extlen;
  664. set->info_buf_offset = sizeof(struct rndis_set_request);
  665. set->dev_vc_handle = 0;
  666. rssp = (struct ndis_recv_scale_param *)(set + 1);
  667. rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
  668. rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
  669. rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
  670. rssp->flag = flag;
  671. rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
  672. NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
  673. NDIS_HASH_TCP_IPV6;
  674. rssp->indirect_tabsize = 4*ITAB_NUM;
  675. rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
  676. rssp->hashkey_size = NETVSC_HASH_KEYLEN;
  677. rssp->hashkey_offset = rssp->indirect_taboffset +
  678. rssp->indirect_tabsize;
  679. /* Set indirection table entries */
  680. itab = (u32 *)(rssp + 1);
  681. for (i = 0; i < ITAB_NUM; i++)
  682. itab[i] = ndc->rx_table[i];
  683. /* Set hask key values */
  684. keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
  685. memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
  686. ret = rndis_filter_send_request(rdev, request);
  687. if (ret != 0)
  688. goto cleanup;
  689. wait_for_completion(&request->wait_event);
  690. set_complete = &request->response_msg.msg.set_complete;
  691. if (set_complete->status == RNDIS_STATUS_SUCCESS) {
  692. if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
  693. !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
  694. memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
  695. } else {
  696. netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
  697. set_complete->status);
  698. ret = -EINVAL;
  699. }
  700. cleanup:
  701. put_rndis_request(rdev, request);
  702. return ret;
  703. }
  704. int rndis_filter_set_rss_param(struct rndis_device *rdev,
  705. const u8 *rss_key)
  706. {
  707. /* Disable RSS before change */
  708. rndis_set_rss_param_msg(rdev, rss_key,
  709. NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
  710. return rndis_set_rss_param_msg(rdev, rss_key, 0);
  711. }
  712. static int rndis_filter_query_device_link_status(struct rndis_device *dev,
  713. struct netvsc_device *net_device)
  714. {
  715. u32 size = sizeof(u32);
  716. u32 link_status;
  717. return rndis_filter_query_device(dev, net_device,
  718. RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
  719. &link_status, &size);
  720. }
  721. static int rndis_filter_query_link_speed(struct rndis_device *dev,
  722. struct netvsc_device *net_device)
  723. {
  724. u32 size = sizeof(u32);
  725. u32 link_speed;
  726. struct net_device_context *ndc;
  727. int ret;
  728. ret = rndis_filter_query_device(dev, net_device,
  729. RNDIS_OID_GEN_LINK_SPEED,
  730. &link_speed, &size);
  731. if (!ret) {
  732. ndc = netdev_priv(dev->ndev);
  733. /* The link speed reported from host is in 100bps unit, so
  734. * we convert it to Mbps here.
  735. */
  736. ndc->speed = link_speed / 10000;
  737. }
  738. return ret;
  739. }
  740. static int rndis_filter_set_packet_filter(struct rndis_device *dev,
  741. u32 new_filter)
  742. {
  743. struct rndis_request *request;
  744. struct rndis_set_request *set;
  745. int ret;
  746. if (dev->filter == new_filter)
  747. return 0;
  748. request = get_rndis_request(dev, RNDIS_MSG_SET,
  749. RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
  750. sizeof(u32));
  751. if (!request)
  752. return -ENOMEM;
  753. /* Setup the rndis set */
  754. set = &request->request_msg.msg.set_req;
  755. set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
  756. set->info_buflen = sizeof(u32);
  757. set->info_buf_offset = sizeof(struct rndis_set_request);
  758. memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
  759. &new_filter, sizeof(u32));
  760. ret = rndis_filter_send_request(dev, request);
  761. if (ret == 0) {
  762. wait_for_completion(&request->wait_event);
  763. dev->filter = new_filter;
  764. }
  765. put_rndis_request(dev, request);
  766. return ret;
  767. }
  768. static void rndis_set_multicast(struct work_struct *w)
  769. {
  770. struct rndis_device *rdev
  771. = container_of(w, struct rndis_device, mcast_work);
  772. u32 filter = NDIS_PACKET_TYPE_DIRECTED;
  773. unsigned int flags = rdev->ndev->flags;
  774. if (flags & IFF_PROMISC) {
  775. filter = NDIS_PACKET_TYPE_PROMISCUOUS;
  776. } else {
  777. if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
  778. filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
  779. if (flags & IFF_BROADCAST)
  780. filter |= NDIS_PACKET_TYPE_BROADCAST;
  781. }
  782. rndis_filter_set_packet_filter(rdev, filter);
  783. }
  784. void rndis_filter_update(struct netvsc_device *nvdev)
  785. {
  786. struct rndis_device *rdev = nvdev->extension;
  787. schedule_work(&rdev->mcast_work);
  788. }
  789. static int rndis_filter_init_device(struct rndis_device *dev,
  790. struct netvsc_device *nvdev)
  791. {
  792. struct rndis_request *request;
  793. struct rndis_initialize_request *init;
  794. struct rndis_initialize_complete *init_complete;
  795. u32 status;
  796. int ret;
  797. request = get_rndis_request(dev, RNDIS_MSG_INIT,
  798. RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
  799. if (!request) {
  800. ret = -ENOMEM;
  801. goto cleanup;
  802. }
  803. /* Setup the rndis set */
  804. init = &request->request_msg.msg.init_req;
  805. init->major_ver = RNDIS_MAJOR_VERSION;
  806. init->minor_ver = RNDIS_MINOR_VERSION;
  807. init->max_xfer_size = 0x4000;
  808. dev->state = RNDIS_DEV_INITIALIZING;
  809. ret = rndis_filter_send_request(dev, request);
  810. if (ret != 0) {
  811. dev->state = RNDIS_DEV_UNINITIALIZED;
  812. goto cleanup;
  813. }
  814. wait_for_completion(&request->wait_event);
  815. init_complete = &request->response_msg.msg.init_complete;
  816. status = init_complete->status;
  817. if (status == RNDIS_STATUS_SUCCESS) {
  818. dev->state = RNDIS_DEV_INITIALIZED;
  819. nvdev->max_pkt = init_complete->max_pkt_per_msg;
  820. nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
  821. ret = 0;
  822. } else {
  823. dev->state = RNDIS_DEV_UNINITIALIZED;
  824. ret = -EINVAL;
  825. }
  826. cleanup:
  827. if (request)
  828. put_rndis_request(dev, request);
  829. return ret;
  830. }
  831. static bool netvsc_device_idle(const struct netvsc_device *nvdev)
  832. {
  833. int i;
  834. for (i = 0; i < nvdev->num_chn; i++) {
  835. const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
  836. if (nvchan->mrc.first != nvchan->mrc.next)
  837. return false;
  838. if (atomic_read(&nvchan->queue_sends) > 0)
  839. return false;
  840. }
  841. return true;
  842. }
  843. static void rndis_filter_halt_device(struct netvsc_device *nvdev,
  844. struct rndis_device *dev)
  845. {
  846. struct rndis_request *request;
  847. struct rndis_halt_request *halt;
  848. /* Attempt to do a rndis device halt */
  849. request = get_rndis_request(dev, RNDIS_MSG_HALT,
  850. RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
  851. if (!request)
  852. goto cleanup;
  853. /* Setup the rndis set */
  854. halt = &request->request_msg.msg.halt_req;
  855. halt->req_id = atomic_inc_return(&dev->new_req_id);
  856. /* Ignore return since this msg is optional. */
  857. rndis_filter_send_request(dev, request);
  858. dev->state = RNDIS_DEV_UNINITIALIZED;
  859. cleanup:
  860. nvdev->destroy = true;
  861. /* Force flag to be ordered before waiting */
  862. wmb();
  863. /* Wait for all send completions */
  864. wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
  865. if (request)
  866. put_rndis_request(dev, request);
  867. }
  868. static int rndis_filter_open_device(struct rndis_device *dev)
  869. {
  870. int ret;
  871. if (dev->state != RNDIS_DEV_INITIALIZED)
  872. return 0;
  873. ret = rndis_filter_set_packet_filter(dev,
  874. NDIS_PACKET_TYPE_BROADCAST |
  875. NDIS_PACKET_TYPE_ALL_MULTICAST |
  876. NDIS_PACKET_TYPE_DIRECTED);
  877. if (ret == 0)
  878. dev->state = RNDIS_DEV_DATAINITIALIZED;
  879. return ret;
  880. }
  881. static int rndis_filter_close_device(struct rndis_device *dev)
  882. {
  883. int ret;
  884. if (dev->state != RNDIS_DEV_DATAINITIALIZED)
  885. return 0;
  886. /* Make sure rndis_set_multicast doesn't re-enable filter! */
  887. cancel_work_sync(&dev->mcast_work);
  888. ret = rndis_filter_set_packet_filter(dev, 0);
  889. if (ret == -ENODEV)
  890. ret = 0;
  891. if (ret == 0)
  892. dev->state = RNDIS_DEV_INITIALIZED;
  893. return ret;
  894. }
  895. static void netvsc_sc_open(struct vmbus_channel *new_sc)
  896. {
  897. struct net_device *ndev =
  898. hv_get_drvdata(new_sc->primary_channel->device_obj);
  899. struct net_device_context *ndev_ctx = netdev_priv(ndev);
  900. struct netvsc_device *nvscdev;
  901. u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
  902. struct netvsc_channel *nvchan;
  903. int ret;
  904. /* This is safe because this callback only happens when
  905. * new device is being setup and waiting on the channel_init_wait.
  906. */
  907. nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
  908. if (!nvscdev || chn_index >= nvscdev->num_chn)
  909. return;
  910. nvchan = nvscdev->chan_table + chn_index;
  911. /* Because the device uses NAPI, all the interrupt batching and
  912. * control is done via Net softirq, not the channel handling
  913. */
  914. set_channel_read_mode(new_sc, HV_CALL_ISR);
  915. /* Set the channel before opening.*/
  916. nvchan->channel = new_sc;
  917. ret = vmbus_open(new_sc, netvsc_ring_bytes,
  918. netvsc_ring_bytes, NULL, 0,
  919. netvsc_channel_cb, nvchan);
  920. if (ret == 0)
  921. napi_enable(&nvchan->napi);
  922. else
  923. netdev_notice(ndev, "sub channel open failed: %d\n", ret);
  924. if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
  925. wake_up(&nvscdev->subchan_open);
  926. }
  927. /* Open sub-channels after completing the handling of the device probe.
  928. * This breaks overlap of processing the host message for the
  929. * new primary channel with the initialization of sub-channels.
  930. */
  931. int rndis_set_subchannel(struct net_device *ndev,
  932. struct netvsc_device *nvdev,
  933. struct netvsc_device_info *dev_info)
  934. {
  935. struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
  936. struct net_device_context *ndev_ctx = netdev_priv(ndev);
  937. struct hv_device *hv_dev = ndev_ctx->device_ctx;
  938. struct rndis_device *rdev = nvdev->extension;
  939. int i, ret;
  940. ASSERT_RTNL();
  941. memset(init_packet, 0, sizeof(struct nvsp_message));
  942. init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
  943. init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
  944. init_packet->msg.v5_msg.subchn_req.num_subchannels =
  945. nvdev->num_chn - 1;
  946. trace_nvsp_send(ndev, init_packet);
  947. ret = vmbus_sendpacket(hv_dev->channel, init_packet,
  948. sizeof(struct nvsp_message),
  949. (unsigned long)init_packet,
  950. VM_PKT_DATA_INBAND,
  951. VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  952. if (ret) {
  953. netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
  954. return ret;
  955. }
  956. wait_for_completion(&nvdev->channel_init_wait);
  957. if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
  958. netdev_err(ndev, "sub channel request failed\n");
  959. return -EIO;
  960. }
  961. nvdev->num_chn = 1 +
  962. init_packet->msg.v5_msg.subchn_comp.num_subchannels;
  963. /* wait for all sub channels to open */
  964. wait_event(nvdev->subchan_open,
  965. atomic_read(&nvdev->open_chn) == nvdev->num_chn);
  966. for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
  967. ndev_ctx->tx_table[i] = i % nvdev->num_chn;
  968. /* ignore failures from setting rss parameters, still have channels */
  969. if (dev_info)
  970. rndis_filter_set_rss_param(rdev, dev_info->rss_key);
  971. else
  972. rndis_filter_set_rss_param(rdev, netvsc_hash_key);
  973. netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
  974. netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
  975. return 0;
  976. }
  977. static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
  978. struct netvsc_device *nvdev)
  979. {
  980. struct net_device *net = rndis_device->ndev;
  981. struct net_device_context *net_device_ctx = netdev_priv(net);
  982. struct ndis_offload hwcaps;
  983. struct ndis_offload_params offloads;
  984. unsigned int gso_max_size = GSO_MAX_SIZE;
  985. int ret;
  986. /* Find HW offload capabilities */
  987. ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
  988. if (ret != 0)
  989. return ret;
  990. /* A value of zero means "no change"; now turn on what we want. */
  991. memset(&offloads, 0, sizeof(struct ndis_offload_params));
  992. /* Linux does not care about IP checksum, always does in kernel */
  993. offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
  994. /* Reset previously set hw_features flags */
  995. net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
  996. net_device_ctx->tx_checksum_mask = 0;
  997. /* Compute tx offload settings based on hw capabilities */
  998. net->hw_features |= NETIF_F_RXCSUM;
  999. net->hw_features |= NETIF_F_SG;
  1000. if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
  1001. /* Can checksum TCP */
  1002. net->hw_features |= NETIF_F_IP_CSUM;
  1003. net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
  1004. offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
  1005. if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
  1006. offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
  1007. net->hw_features |= NETIF_F_TSO;
  1008. if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
  1009. gso_max_size = hwcaps.lsov2.ip4_maxsz;
  1010. }
  1011. if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
  1012. offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
  1013. net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
  1014. }
  1015. }
  1016. if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
  1017. net->hw_features |= NETIF_F_IPV6_CSUM;
  1018. offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
  1019. net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
  1020. if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
  1021. (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
  1022. offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
  1023. net->hw_features |= NETIF_F_TSO6;
  1024. if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
  1025. gso_max_size = hwcaps.lsov2.ip6_maxsz;
  1026. }
  1027. if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
  1028. offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
  1029. net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
  1030. }
  1031. }
  1032. if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
  1033. net->hw_features |= NETIF_F_LRO;
  1034. if (net->features & NETIF_F_LRO) {
  1035. offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
  1036. offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
  1037. } else {
  1038. offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
  1039. offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
  1040. }
  1041. }
  1042. /* In case some hw_features disappeared we need to remove them from
  1043. * net->features list as they're no longer supported.
  1044. */
  1045. net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
  1046. netif_set_gso_max_size(net, gso_max_size);
  1047. ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
  1048. return ret;
  1049. }
  1050. static void rndis_get_friendly_name(struct net_device *net,
  1051. struct rndis_device *rndis_device,
  1052. struct netvsc_device *net_device)
  1053. {
  1054. ucs2_char_t wname[256];
  1055. unsigned long len;
  1056. u8 ifalias[256];
  1057. u32 size;
  1058. size = sizeof(wname);
  1059. if (rndis_filter_query_device(rndis_device, net_device,
  1060. RNDIS_OID_GEN_FRIENDLY_NAME,
  1061. wname, &size) != 0)
  1062. return; /* ignore if host does not support */
  1063. if (size == 0)
  1064. return; /* name not set */
  1065. /* Convert Windows Unicode string to UTF-8 */
  1066. len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
  1067. /* ignore the default value from host */
  1068. if (strcmp(ifalias, "Network Adapter") != 0)
  1069. dev_set_alias(net, ifalias, len);
  1070. }
  1071. struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
  1072. struct netvsc_device_info *device_info)
  1073. {
  1074. struct net_device *net = hv_get_drvdata(dev);
  1075. struct net_device_context *ndc = netdev_priv(net);
  1076. struct netvsc_device *net_device;
  1077. struct rndis_device *rndis_device;
  1078. struct ndis_recv_scale_cap rsscap;
  1079. u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
  1080. u32 mtu, size;
  1081. u32 num_possible_rss_qs;
  1082. int i, ret;
  1083. rndis_device = get_rndis_device();
  1084. if (!rndis_device)
  1085. return ERR_PTR(-ENODEV);
  1086. /* Let the inner driver handle this first to create the netvsc channel
  1087. * NOTE! Once the channel is created, we may get a receive callback
  1088. * (RndisFilterOnReceive()) before this call is completed
  1089. */
  1090. net_device = netvsc_device_add(dev, device_info);
  1091. if (IS_ERR(net_device)) {
  1092. kfree(rndis_device);
  1093. return net_device;
  1094. }
  1095. /* Initialize the rndis device */
  1096. net_device->max_chn = 1;
  1097. net_device->num_chn = 1;
  1098. net_device->extension = rndis_device;
  1099. rndis_device->ndev = net;
  1100. /* Send the rndis initialization message */
  1101. ret = rndis_filter_init_device(rndis_device, net_device);
  1102. if (ret != 0)
  1103. goto err_dev_remv;
  1104. /* Get the MTU from the host */
  1105. size = sizeof(u32);
  1106. ret = rndis_filter_query_device(rndis_device, net_device,
  1107. RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
  1108. &mtu, &size);
  1109. if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
  1110. net->mtu = mtu;
  1111. /* Get the mac address */
  1112. ret = rndis_filter_query_device_mac(rndis_device, net_device);
  1113. if (ret != 0)
  1114. goto err_dev_remv;
  1115. memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
  1116. /* Get friendly name as ifalias*/
  1117. if (!net->ifalias)
  1118. rndis_get_friendly_name(net, rndis_device, net_device);
  1119. /* Query and set hardware capabilities */
  1120. ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
  1121. if (ret != 0)
  1122. goto err_dev_remv;
  1123. rndis_filter_query_device_link_status(rndis_device, net_device);
  1124. netdev_dbg(net, "Device MAC %pM link state %s\n",
  1125. rndis_device->hw_mac_adr,
  1126. rndis_device->link_state ? "down" : "up");
  1127. if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
  1128. goto out;
  1129. rndis_filter_query_link_speed(rndis_device, net_device);
  1130. /* vRSS setup */
  1131. memset(&rsscap, 0, rsscap_size);
  1132. ret = rndis_filter_query_device(rndis_device, net_device,
  1133. OID_GEN_RECEIVE_SCALE_CAPABILITIES,
  1134. &rsscap, &rsscap_size);
  1135. if (ret || rsscap.num_recv_que < 2)
  1136. goto out;
  1137. /* This guarantees that num_possible_rss_qs <= num_online_cpus */
  1138. num_possible_rss_qs = min_t(u32, num_online_cpus(),
  1139. rsscap.num_recv_que);
  1140. net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
  1141. /* We will use the given number of channels if available. */
  1142. net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
  1143. if (!netif_is_rxfh_configured(net)) {
  1144. for (i = 0; i < ITAB_NUM; i++)
  1145. ndc->rx_table[i] = ethtool_rxfh_indir_default(
  1146. i, net_device->num_chn);
  1147. }
  1148. atomic_set(&net_device->open_chn, 1);
  1149. vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
  1150. for (i = 1; i < net_device->num_chn; i++) {
  1151. ret = netvsc_alloc_recv_comp_ring(net_device, i);
  1152. if (ret) {
  1153. while (--i != 0)
  1154. vfree(net_device->chan_table[i].mrc.slots);
  1155. goto out;
  1156. }
  1157. }
  1158. for (i = 1; i < net_device->num_chn; i++)
  1159. netif_napi_add(net, &net_device->chan_table[i].napi,
  1160. netvsc_poll, NAPI_POLL_WEIGHT);
  1161. return net_device;
  1162. out:
  1163. /* setting up multiple channels failed */
  1164. net_device->max_chn = 1;
  1165. net_device->num_chn = 1;
  1166. return net_device;
  1167. err_dev_remv:
  1168. rndis_filter_device_remove(dev, net_device);
  1169. return ERR_PTR(ret);
  1170. }
  1171. void rndis_filter_device_remove(struct hv_device *dev,
  1172. struct netvsc_device *net_dev)
  1173. {
  1174. struct rndis_device *rndis_dev = net_dev->extension;
  1175. /* Halt and release the rndis device */
  1176. rndis_filter_halt_device(net_dev, rndis_dev);
  1177. netvsc_device_remove(dev);
  1178. }
  1179. int rndis_filter_open(struct netvsc_device *nvdev)
  1180. {
  1181. if (!nvdev)
  1182. return -EINVAL;
  1183. return rndis_filter_open_device(nvdev->extension);
  1184. }
  1185. int rndis_filter_close(struct netvsc_device *nvdev)
  1186. {
  1187. if (!nvdev)
  1188. return -EINVAL;
  1189. return rndis_filter_close_device(nvdev->extension);
  1190. }