New upstream version 5.3.9~20191129
This commit is contained in:
340
os_dep/linux/custom_gpio_linux.c
Normal file
340
os_dep/linux/custom_gpio_linux.c
Normal file
@@ -0,0 +1,340 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2007 - 2017 Realtek Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#include "drv_types.h"
|
||||
|
||||
#ifdef CONFIG_PLATFORM_SPRD
|
||||
|
||||
/* gspi func & GPIO define */
|
||||
#include <mach/gpio.h>/* 0915 */
|
||||
#include <mach/board.h>
|
||||
|
||||
#if !(defined ANDROID_2X)
|
||||
|
||||
#ifdef CONFIG_RTL8188E
|
||||
#include <mach/regulator.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#endif /* CONFIG_RTL8188E */
|
||||
|
||||
#ifndef GPIO_WIFI_POWER
|
||||
#define GPIO_WIFI_POWER -1
|
||||
#endif /* !GPIO_WIFI_POWER */
|
||||
|
||||
#ifndef GPIO_WIFI_RESET
|
||||
#define GPIO_WIFI_RESET -1
|
||||
#endif /* !GPIO_WIFI_RESET */
|
||||
|
||||
#ifndef GPIO_WIFI_PWDN
|
||||
#define GPIO_WIFI_PWDN -1
|
||||
#endif /* !GPIO_WIFI_RESET */
|
||||
#ifdef CONFIG_GSPI_HCI
|
||||
extern unsigned int oob_irq;
|
||||
#endif /* CONFIG_GSPI_HCI */
|
||||
|
||||
#ifdef CONFIG_SDIO_HCI
|
||||
extern int rtw_mp_mode;
|
||||
#else /* !CONFIG_SDIO_HCI */
|
||||
#endif /* !CONFIG_SDIO_HCI */
|
||||
|
||||
int rtw_wifi_gpio_init(void)
|
||||
{
|
||||
#ifdef CONFIG_GSPI_HCI
|
||||
if (GPIO_WIFI_IRQ > 0) {
|
||||
gpio_request(GPIO_WIFI_IRQ, "oob_irq");
|
||||
gpio_direction_input(GPIO_WIFI_IRQ);
|
||||
|
||||
oob_irq = gpio_to_irq(GPIO_WIFI_IRQ);
|
||||
|
||||
RTW_INFO("%s oob_irq:%d\n", __func__, oob_irq);
|
||||
}
|
||||
#endif
|
||||
if (GPIO_WIFI_RESET > 0)
|
||||
gpio_request(GPIO_WIFI_RESET , "wifi_rst");
|
||||
if (GPIO_WIFI_POWER > 0)
|
||||
gpio_request(GPIO_WIFI_POWER, "wifi_power");
|
||||
|
||||
#ifdef CONFIG_SDIO_HCI
|
||||
#if (defined(CONFIG_RTL8723B)) && (MP_DRIVER == 1)
|
||||
if (rtw_mp_mode == 1) {
|
||||
RTW_INFO("%s GPIO_BT_RESET pin special for mp_test\n", __func__);
|
||||
if (GPIO_BT_RESET > 0)
|
||||
gpio_request(GPIO_BT_RESET , "bt_rst");
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rtw_wifi_gpio_deinit(void)
|
||||
{
|
||||
#ifdef CONFIG_GSPI_HCI
|
||||
if (GPIO_WIFI_IRQ > 0)
|
||||
gpio_free(GPIO_WIFI_IRQ);
|
||||
#endif
|
||||
if (GPIO_WIFI_RESET > 0)
|
||||
gpio_free(GPIO_WIFI_RESET);
|
||||
if (GPIO_WIFI_POWER > 0)
|
||||
gpio_free(GPIO_WIFI_POWER);
|
||||
|
||||
#ifdef CONFIG_SDIO_HCI
|
||||
#if (defined(CONFIG_RTL8723B)) && (MP_DRIVER == 1)
|
||||
if (rtw_mp_mode == 1) {
|
||||
RTW_INFO("%s GPIO_BT_RESET pin special for mp_test\n", __func__);
|
||||
if (GPIO_BT_RESET > 0)
|
||||
gpio_free(GPIO_BT_RESET);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Customer function to control hw specific wlan gpios */
|
||||
void rtw_wifi_gpio_wlan_ctrl(int onoff)
|
||||
{
|
||||
switch (onoff) {
|
||||
case WLAN_PWDN_OFF:
|
||||
RTW_INFO("%s: call customer specific GPIO(%d) to set wifi power down pin to 0\n",
|
||||
__FUNCTION__, GPIO_WIFI_RESET);
|
||||
|
||||
#ifndef CONFIG_DONT_BUS_SCAN
|
||||
if (GPIO_WIFI_RESET > 0)
|
||||
gpio_direction_output(GPIO_WIFI_RESET , 0);
|
||||
#endif
|
||||
break;
|
||||
|
||||
case WLAN_PWDN_ON:
|
||||
RTW_INFO("%s: callc customer specific GPIO(%d) to set wifi power down pin to 1\n",
|
||||
__FUNCTION__, GPIO_WIFI_RESET);
|
||||
|
||||
if (GPIO_WIFI_RESET > 0)
|
||||
gpio_direction_output(GPIO_WIFI_RESET , 1);
|
||||
break;
|
||||
|
||||
case WLAN_POWER_OFF:
|
||||
break;
|
||||
|
||||
case WLAN_POWER_ON:
|
||||
break;
|
||||
#ifdef CONFIG_SDIO_HCI
|
||||
#if (defined(CONFIG_RTL8723B)) && (MP_DRIVER == 1)
|
||||
case WLAN_BT_PWDN_OFF:
|
||||
if (rtw_mp_mode == 1) {
|
||||
RTW_INFO("%s: call customer specific GPIO to set wifi power down pin to 0\n",
|
||||
__FUNCTION__);
|
||||
if (GPIO_BT_RESET > 0)
|
||||
gpio_direction_output(GPIO_BT_RESET , 0);
|
||||
}
|
||||
break;
|
||||
|
||||
case WLAN_BT_PWDN_ON:
|
||||
if (rtw_mp_mode == 1) {
|
||||
RTW_INFO("%s: callc customer specific GPIO to set wifi power down pin to 1 %x\n",
|
||||
__FUNCTION__, GPIO_BT_RESET);
|
||||
|
||||
if (GPIO_BT_RESET > 0)
|
||||
gpio_direction_output(GPIO_BT_RESET , 1);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#else /* ANDROID_2X */
|
||||
|
||||
#include <mach/ldo.h>
|
||||
|
||||
#ifdef CONFIG_RTL8188E
|
||||
extern int sprd_3rdparty_gpio_wifi_power;
|
||||
#endif
|
||||
extern int sprd_3rdparty_gpio_wifi_pwd;
|
||||
#if defined(CONFIG_RTL8723B)
|
||||
extern int sprd_3rdparty_gpio_bt_reset;
|
||||
#endif
|
||||
|
||||
int rtw_wifi_gpio_init(void)
|
||||
{
|
||||
#if defined(CONFIG_RTL8723B)
|
||||
if (sprd_3rdparty_gpio_bt_reset > 0)
|
||||
gpio_direction_output(sprd_3rdparty_gpio_bt_reset, 1);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rtw_wifi_gpio_deinit(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Customer function to control hw specific wlan gpios */
|
||||
void rtw_wifi_gpio_wlan_ctrl(int onoff)
|
||||
{
|
||||
switch (onoff) {
|
||||
case WLAN_PWDN_OFF:
|
||||
RTW_INFO("%s: call customer specific GPIO to set wifi power down pin to 0\n",
|
||||
__FUNCTION__);
|
||||
if (sprd_3rdparty_gpio_wifi_pwd > 0)
|
||||
gpio_set_value(sprd_3rdparty_gpio_wifi_pwd, 0);
|
||||
|
||||
if (sprd_3rdparty_gpio_wifi_pwd == 60) {
|
||||
RTW_INFO("%s: turn off VSIM2 2.8V\n", __func__);
|
||||
LDO_TurnOffLDO(LDO_LDO_SIM2);
|
||||
}
|
||||
break;
|
||||
|
||||
case WLAN_PWDN_ON:
|
||||
RTW_INFO("%s: callc customer specific GPIO to set wifi power down pin to 1\n",
|
||||
__FUNCTION__);
|
||||
if (sprd_3rdparty_gpio_wifi_pwd == 60) {
|
||||
RTW_INFO("%s: turn on VSIM2 2.8V\n", __func__);
|
||||
LDO_SetVoltLevel(LDO_LDO_SIM2, LDO_VOLT_LEVEL0);
|
||||
LDO_TurnOnLDO(LDO_LDO_SIM2);
|
||||
}
|
||||
if (sprd_3rdparty_gpio_wifi_pwd > 0)
|
||||
gpio_set_value(sprd_3rdparty_gpio_wifi_pwd, 1);
|
||||
break;
|
||||
|
||||
case WLAN_POWER_OFF:
|
||||
#ifdef CONFIG_RTL8188E
|
||||
#ifdef CONFIG_WIF1_LDO
|
||||
RTW_INFO("%s: turn off VDD-WIFI0 1.2V\n", __FUNCTION__);
|
||||
LDO_TurnOffLDO(LDO_LDO_WIF1);
|
||||
#endif /* CONFIG_WIF1_LDO */
|
||||
|
||||
RTW_INFO("%s: turn off VDD-WIFI0 3.3V\n", __FUNCTION__);
|
||||
LDO_TurnOffLDO(LDO_LDO_WIF0);
|
||||
|
||||
RTW_INFO("%s: call customer specific GPIO(%d) to turn off wifi power\n",
|
||||
__FUNCTION__, sprd_3rdparty_gpio_wifi_power);
|
||||
if (sprd_3rdparty_gpio_wifi_power != 65535)
|
||||
gpio_set_value(sprd_3rdparty_gpio_wifi_power, 0);
|
||||
#endif
|
||||
break;
|
||||
|
||||
case WLAN_POWER_ON:
|
||||
#ifdef CONFIG_RTL8188E
|
||||
RTW_INFO("%s: call customer specific GPIO(%d) to turn on wifi power\n",
|
||||
__FUNCTION__, sprd_3rdparty_gpio_wifi_power);
|
||||
if (sprd_3rdparty_gpio_wifi_power != 65535)
|
||||
gpio_set_value(sprd_3rdparty_gpio_wifi_power, 1);
|
||||
|
||||
RTW_INFO("%s: turn on VDD-WIFI0 3.3V\n", __FUNCTION__);
|
||||
LDO_TurnOnLDO(LDO_LDO_WIF0);
|
||||
LDO_SetVoltLevel(LDO_LDO_WIF0, LDO_VOLT_LEVEL1);
|
||||
|
||||
#ifdef CONFIG_WIF1_LDO
|
||||
RTW_INFO("%s: turn on VDD-WIFI1 1.2V\n", __func__);
|
||||
LDO_TurnOnLDO(LDO_LDO_WIF1);
|
||||
LDO_SetVoltLevel(LDO_LDO_WIF1, LDO_VOLT_LEVEL3);
|
||||
#endif /* CONFIG_WIF1_LDO */
|
||||
#endif
|
||||
break;
|
||||
|
||||
case WLAN_BT_PWDN_OFF:
|
||||
RTW_INFO("%s: call customer specific GPIO to set bt power down pin to 0\n",
|
||||
__FUNCTION__);
|
||||
#if defined(CONFIG_RTL8723B)
|
||||
if (sprd_3rdparty_gpio_bt_reset > 0)
|
||||
gpio_set_value(sprd_3rdparty_gpio_bt_reset, 0);
|
||||
#endif
|
||||
break;
|
||||
|
||||
case WLAN_BT_PWDN_ON:
|
||||
RTW_INFO("%s: callc customer specific GPIO to set bt power down pin to 1\n",
|
||||
__FUNCTION__);
|
||||
#if defined(CONFIG_RTL8723B)
|
||||
if (sprd_3rdparty_gpio_bt_reset > 0)
|
||||
gpio_set_value(sprd_3rdparty_gpio_bt_reset, 1);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif /* ANDROID_2X */
|
||||
|
||||
#elif defined(CONFIG_PLATFORM_ARM_RK3066)
|
||||
#include <mach/iomux.h>
|
||||
|
||||
#define GPIO_WIFI_IRQ RK30_PIN2_PC2
|
||||
extern unsigned int oob_irq;
|
||||
int rtw_wifi_gpio_init(void)
|
||||
{
|
||||
#ifdef CONFIG_GSPI_HCI
|
||||
if (GPIO_WIFI_IRQ > 0) {
|
||||
rk30_mux_api_set(GPIO2C2_LCDC1DATA18_SMCBLSN1_HSADCDATA5_NAME, GPIO2C_GPIO2C2);/* jacky_test */
|
||||
gpio_request(GPIO_WIFI_IRQ, "oob_irq");
|
||||
gpio_direction_input(GPIO_WIFI_IRQ);
|
||||
|
||||
oob_irq = gpio_to_irq(GPIO_WIFI_IRQ);
|
||||
|
||||
RTW_INFO("%s oob_irq:%d\n", __func__, oob_irq);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int rtw_wifi_gpio_deinit(void)
|
||||
{
|
||||
#ifdef CONFIG_GSPI_HCI
|
||||
if (GPIO_WIFI_IRQ > 0)
|
||||
gpio_free(GPIO_WIFI_IRQ);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rtw_wifi_gpio_wlan_ctrl(int onoff)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GPIO_API
|
||||
/* this is a demo for extending GPIO pin[7] as interrupt mode */
|
||||
struct net_device *rtl_net;
|
||||
extern int rtw_register_gpio_interrupt(struct net_device *netdev, int gpio_num, void(*callback)(u8 level));
|
||||
extern int rtw_disable_gpio_interrupt(struct net_device *netdev, int gpio_num);
|
||||
void gpio_int(u8 is_high)
|
||||
{
|
||||
RTW_INFO("%s level=%d\n", __func__, is_high);
|
||||
}
|
||||
int register_net_gpio_init(void)
|
||||
{
|
||||
rtl_net = dev_get_by_name(&init_net, "wlan0");
|
||||
if (!rtl_net) {
|
||||
RTW_PRINT("rtl_net init fail!\n");
|
||||
return -1;
|
||||
}
|
||||
return rtw_register_gpio_interrupt(rtl_net, 7, gpio_int);
|
||||
}
|
||||
int unregister_net_gpio_init(void)
|
||||
{
|
||||
rtl_net = dev_get_by_name(&init_net, "wlan0");
|
||||
if (!rtl_net) {
|
||||
RTW_PRINT("rtl_net init fail!\n");
|
||||
return -1;
|
||||
}
|
||||
return rtw_disable_gpio_interrupt(rtl_net, 7);
|
||||
}
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
int rtw_wifi_gpio_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rtw_wifi_gpio_wlan_ctrl(int onoff)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_PLATFORM_SPRD */
|
||||
9770
os_dep/linux/ioctl_cfg80211.c
Normal file
9770
os_dep/linux/ioctl_cfg80211.c
Normal file
File diff suppressed because it is too large
Load Diff
398
os_dep/linux/ioctl_cfg80211.h
Normal file
398
os_dep/linux/ioctl_cfg80211.h
Normal file
@@ -0,0 +1,398 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2007 - 2017 Realtek Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __IOCTL_CFG80211_H__
|
||||
#define __IOCTL_CFG80211_H__
|
||||
|
||||
#define RTW_CFG80211_BLOCK_DISCON_WHEN_CONNECT BIT0
|
||||
#define RTW_CFG80211_BLOCK_DISCON_WHEN_DISCONNECT BIT1
|
||||
|
||||
#ifndef RTW_CFG80211_BLOCK_STA_DISCON_EVENT
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
|
||||
#define RTW_CFG80211_BLOCK_STA_DISCON_EVENT (RTW_CFG80211_BLOCK_DISCON_WHEN_CONNECT)
|
||||
#else
|
||||
#define RTW_CFG80211_BLOCK_STA_DISCON_EVENT (RTW_CFG80211_BLOCK_DISCON_WHEN_CONNECT | RTW_CFG80211_BLOCK_DISCON_WHEN_DISCONNECT)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(RTW_USE_CFG80211_STA_EVENT)
|
||||
#undef CONFIG_CFG80211_FORCE_COMPATIBLE_2_6_37_UNDER
|
||||
#endif
|
||||
|
||||
#ifndef RTW_P2P_GROUP_INTERFACE
|
||||
#define RTW_P2P_GROUP_INTERFACE 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* (RTW_P2P_GROUP_INTERFACE, RTW_DEDICATED_P2P_DEVICE)
|
||||
* (0, 0): wlan0 + p2p0(PD+PG)
|
||||
* (1, 0): wlan0(with PD) + dynamic PGs
|
||||
* (1, 1): wlan0 (with dynamic PD wdev) + dynamic PGs
|
||||
*/
|
||||
|
||||
#if RTW_P2P_GROUP_INTERFACE
|
||||
#ifndef CONFIG_RTW_DYNAMIC_NDEV
|
||||
#define CONFIG_RTW_DYNAMIC_NDEV
|
||||
#endif
|
||||
#ifndef RTW_SINGLE_WIPHY
|
||||
#define RTW_SINGLE_WIPHY
|
||||
#endif
|
||||
#ifndef CONFIG_RADIO_WORK
|
||||
#define CONFIG_RADIO_WORK
|
||||
#endif
|
||||
#ifndef RTW_DEDICATED_P2P_DEVICE
|
||||
#define RTW_DEDICATED_P2P_DEVICE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_RADIO_WORK
|
||||
#define RTW_ROCH_DURATION_ENLARGE
|
||||
#define RTW_ROCH_BACK_OP
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_P2P) && RTW_P2P_GROUP_INTERFACE
|
||||
#error "RTW_P2P_GROUP_INTERFACE can't be enabled when CONFIG_P2P is disabled\n"
|
||||
#endif
|
||||
|
||||
#if !RTW_P2P_GROUP_INTERFACE && defined(RTW_DEDICATED_P2P_DEVICE)
|
||||
#error "RTW_DEDICATED_P2P_DEVICE can't be enabled when RTW_P2P_GROUP_INTERFACE is disabled\n"
|
||||
#endif
|
||||
|
||||
#if defined(RTW_DEDICATED_P2P_DEVICE) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
|
||||
#error "RTW_DEDICATED_P2P_DEVICE can't be enabled when kernel < 3.7.0\n"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RTW_MESH
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
|
||||
#error "CONFIG_RTW_MESH can't be enabled when kernel < 3.10.0\n"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
struct rtw_wdev_invit_info {
|
||||
u8 state; /* 0: req, 1:rep */
|
||||
u8 peer_mac[ETH_ALEN];
|
||||
u8 group_bssid[ETH_ALEN];
|
||||
u8 active;
|
||||
u8 token;
|
||||
u8 flags;
|
||||
u8 status;
|
||||
u8 req_op_ch;
|
||||
u8 rsp_op_ch;
|
||||
};
|
||||
|
||||
#define rtw_wdev_invit_info_init(invit_info) \
|
||||
do { \
|
||||
(invit_info)->state = 0xff; \
|
||||
_rtw_memset((invit_info)->peer_mac, 0, ETH_ALEN); \
|
||||
_rtw_memset((invit_info)->group_bssid, 0, ETH_ALEN); \
|
||||
(invit_info)->active = 0xff; \
|
||||
(invit_info)->token = 0; \
|
||||
(invit_info)->flags = 0x00; \
|
||||
(invit_info)->status = 0xff; \
|
||||
(invit_info)->req_op_ch = 0; \
|
||||
(invit_info)->rsp_op_ch = 0; \
|
||||
} while (0)
|
||||
|
||||
struct rtw_wdev_nego_info {
|
||||
u8 state; /* 0: req, 1:rep, 2:conf */
|
||||
u8 iface_addr[ETH_ALEN];
|
||||
u8 peer_mac[ETH_ALEN];
|
||||
u8 peer_iface_addr[ETH_ALEN];
|
||||
u8 active;
|
||||
u8 token;
|
||||
u8 status;
|
||||
u8 req_intent;
|
||||
u8 req_op_ch;
|
||||
u8 req_listen_ch;
|
||||
u8 rsp_intent;
|
||||
u8 rsp_op_ch;
|
||||
u8 conf_op_ch;
|
||||
};
|
||||
|
||||
#define rtw_wdev_nego_info_init(nego_info) \
|
||||
do { \
|
||||
(nego_info)->state = 0xff; \
|
||||
_rtw_memset((nego_info)->iface_addr, 0, ETH_ALEN); \
|
||||
_rtw_memset((nego_info)->peer_mac, 0, ETH_ALEN); \
|
||||
_rtw_memset((nego_info)->peer_iface_addr, 0, ETH_ALEN); \
|
||||
(nego_info)->active = 0xff; \
|
||||
(nego_info)->token = 0; \
|
||||
(nego_info)->status = 0xff; \
|
||||
(nego_info)->req_intent = 0xff; \
|
||||
(nego_info)->req_op_ch = 0; \
|
||||
(nego_info)->req_listen_ch = 0; \
|
||||
(nego_info)->rsp_intent = 0xff; \
|
||||
(nego_info)->rsp_op_ch = 0; \
|
||||
(nego_info)->conf_op_ch = 0; \
|
||||
} while (0)
|
||||
|
||||
struct rtw_wdev_priv {
|
||||
struct wireless_dev *rtw_wdev;
|
||||
|
||||
_adapter *padapter;
|
||||
|
||||
#if RTW_CFG80211_BLOCK_STA_DISCON_EVENT
|
||||
u8 not_indic_disco;
|
||||
#endif
|
||||
|
||||
struct cfg80211_scan_request *scan_request;
|
||||
_lock scan_req_lock;
|
||||
|
||||
struct cfg80211_connect_params *connect_req;
|
||||
_lock connect_req_lock;
|
||||
|
||||
struct net_device *pmon_ndev;/* for monitor interface */
|
||||
char ifname_mon[IFNAMSIZ + 1]; /* interface name for monitor interface */
|
||||
|
||||
u8 p2p_enabled;
|
||||
systime probe_resp_ie_update_time;
|
||||
|
||||
u8 provdisc_req_issued;
|
||||
|
||||
struct rtw_wdev_invit_info invit_info;
|
||||
struct rtw_wdev_nego_info nego_info;
|
||||
|
||||
u8 bandroid_scan;
|
||||
bool block;
|
||||
bool block_scan;
|
||||
bool power_mgmt;
|
||||
|
||||
/* report mgmt_frame registered */
|
||||
u16 report_mgmt;
|
||||
|
||||
u8 is_mgmt_tx;
|
||||
u16 mgmt_tx_cookie;
|
||||
|
||||
_mutex roch_mutex;
|
||||
|
||||
#ifdef CONFIG_CONCURRENT_MODE
|
||||
ATOMIC_T switch_ch_to;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
bool rtw_cfg80211_is_connect_requested(_adapter *adapter);
|
||||
|
||||
#if RTW_CFG80211_BLOCK_STA_DISCON_EVENT
|
||||
#define rtw_wdev_not_indic_disco(rtw_wdev_data) ((rtw_wdev_data)->not_indic_disco)
|
||||
#define rtw_wdev_set_not_indic_disco(rtw_wdev_data, val) do { (rtw_wdev_data)->not_indic_disco = (val); } while (0)
|
||||
#else
|
||||
#define rtw_wdev_not_indic_disco(rtw_wdev_data) 0
|
||||
#define rtw_wdev_set_not_indic_disco(rtw_wdev_data, val) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define rtw_wdev_free_connect_req(rtw_wdev_data) \
|
||||
do { \
|
||||
if ((rtw_wdev_data)->connect_req) { \
|
||||
rtw_mfree((u8 *)(rtw_wdev_data)->connect_req, sizeof(*(rtw_wdev_data)->connect_req)); \
|
||||
(rtw_wdev_data)->connect_req = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define wdev_to_ndev(w) ((w)->netdev)
|
||||
#define wdev_to_wiphy(w) ((w)->wiphy)
|
||||
#define ndev_to_wdev(n) ((n)->ieee80211_ptr)
|
||||
|
||||
struct rtw_wiphy_data {
|
||||
struct dvobj_priv *dvobj;
|
||||
|
||||
#ifndef RTW_SINGLE_WIPHY
|
||||
_adapter *adapter;
|
||||
#endif
|
||||
|
||||
#if defined(RTW_DEDICATED_P2P_DEVICE)
|
||||
struct wireless_dev *pd_wdev; /* P2P device wdev */
|
||||
#endif
|
||||
};
|
||||
|
||||
#define rtw_wiphy_priv(wiphy) ((struct rtw_wiphy_data *)wiphy_priv(wiphy))
|
||||
#define wiphy_to_dvobj(wiphy) (((struct rtw_wiphy_data *)wiphy_priv(wiphy))->dvobj)
|
||||
#ifdef RTW_SINGLE_WIPHY
|
||||
#define wiphy_to_adapter(wiphy) (dvobj_get_primary_adapter(wiphy_to_dvobj(wiphy)))
|
||||
#else
|
||||
#define wiphy_to_adapter(wiphy) (((struct rtw_wiphy_data *)wiphy_priv(wiphy))->adapter)
|
||||
#endif
|
||||
|
||||
#if defined(RTW_DEDICATED_P2P_DEVICE)
|
||||
#define wiphy_to_pd_wdev(wiphy) (rtw_wiphy_priv(wiphy)->pd_wdev)
|
||||
#else
|
||||
#define wiphy_to_pd_wdev(wiphy) NULL
|
||||
#endif
|
||||
|
||||
#define WIPHY_FMT "%s"
|
||||
#define WIPHY_ARG(wiphy) wiphy_name(wiphy)
|
||||
#define FUNC_WIPHY_FMT "%s("WIPHY_FMT")"
|
||||
#define FUNC_WIPHY_ARG(wiphy) __func__, WIPHY_ARG(wiphy)
|
||||
|
||||
#define SET_CFG80211_REPORT_MGMT(w, t, v) (w->report_mgmt |= (v ? BIT(t >> 4) : 0))
|
||||
#define GET_CFG80211_REPORT_MGMT(w, t) ((w->report_mgmt & BIT(t >> 4)) > 0)
|
||||
|
||||
struct wiphy *rtw_wiphy_alloc(_adapter *padapter, struct device *dev);
|
||||
void rtw_wiphy_free(struct wiphy *wiphy);
|
||||
int rtw_wiphy_register(struct wiphy *wiphy);
|
||||
void rtw_wiphy_unregister(struct wiphy *wiphy);
|
||||
|
||||
int rtw_wdev_alloc(_adapter *padapter, struct wiphy *wiphy);
|
||||
void rtw_wdev_free(struct wireless_dev *wdev);
|
||||
void rtw_wdev_unregister(struct wireless_dev *wdev);
|
||||
|
||||
int rtw_cfg80211_ndev_res_alloc(_adapter *adapter);
|
||||
void rtw_cfg80211_ndev_res_free(_adapter *adapter);
|
||||
int rtw_cfg80211_ndev_res_register(_adapter *adapter);
|
||||
void rtw_cfg80211_ndev_res_unregister(_adapter *adapter);
|
||||
|
||||
int rtw_cfg80211_dev_res_alloc(struct dvobj_priv *dvobj);
|
||||
void rtw_cfg80211_dev_res_free(struct dvobj_priv *dvobj);
|
||||
int rtw_cfg80211_dev_res_register(struct dvobj_priv *dvobj);
|
||||
void rtw_cfg80211_dev_res_unregister(struct dvobj_priv *dvobj);
|
||||
|
||||
void rtw_cfg80211_init_wdev_data(_adapter *padapter);
|
||||
void rtw_cfg80211_init_wiphy(_adapter *padapter);
|
||||
|
||||
void rtw_cfg80211_unlink_bss(_adapter *padapter, struct wlan_network *pnetwork);
|
||||
void rtw_cfg80211_surveydone_event_callback(_adapter *padapter);
|
||||
struct cfg80211_bss *rtw_cfg80211_inform_bss(_adapter *padapter, struct wlan_network *pnetwork);
|
||||
int rtw_cfg80211_check_bss(_adapter *padapter);
|
||||
void rtw_cfg80211_ibss_indicate_connect(_adapter *padapter);
|
||||
void rtw_cfg80211_indicate_connect(_adapter *padapter);
|
||||
void rtw_cfg80211_indicate_disconnect(_adapter *padapter, u16 reason, u8 locally_generated);
|
||||
void rtw_cfg80211_indicate_scan_done(_adapter *adapter, bool aborted);
|
||||
u32 rtw_cfg80211_wait_scan_req_empty(_adapter *adapter, u32 timeout_ms);
|
||||
|
||||
#ifdef CONFIG_CONCURRENT_MODE
|
||||
u8 rtw_cfg80211_scan_via_buddy(_adapter *padapter, struct cfg80211_scan_request *request);
|
||||
void rtw_cfg80211_indicate_scan_done_for_buddy(_adapter *padapter, bool bscan_aborted);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AP_MODE
|
||||
void rtw_cfg80211_indicate_sta_assoc(_adapter *padapter, u8 *pmgmt_frame, uint frame_len);
|
||||
void rtw_cfg80211_indicate_sta_disassoc(_adapter *padapter, const u8 *da, unsigned short reason);
|
||||
#endif /* CONFIG_AP_MODE */
|
||||
|
||||
#ifdef CONFIG_P2P
|
||||
void rtw_cfg80211_set_is_roch(_adapter *adapter, bool val);
|
||||
bool rtw_cfg80211_get_is_roch(_adapter *adapter);
|
||||
bool rtw_cfg80211_is_ro_ch_once(_adapter *adapter);
|
||||
void rtw_cfg80211_set_last_ro_ch_time(_adapter *adapter);
|
||||
s32 rtw_cfg80211_get_last_ro_ch_passing_ms(_adapter *adapter);
|
||||
|
||||
int rtw_cfg80211_iface_has_p2p_group_cap(_adapter *adapter);
|
||||
int rtw_cfg80211_is_p2p_scan(_adapter *adapter);
|
||||
#if defined(RTW_DEDICATED_P2P_DEVICE)
|
||||
int rtw_cfg80211_redirect_pd_wdev(struct wiphy *wiphy, u8 *ra, struct wireless_dev **wdev);
|
||||
int rtw_cfg80211_is_scan_by_pd_wdev(_adapter *adapter);
|
||||
int rtw_pd_iface_alloc(struct wiphy *wiphy, const char *name, struct wireless_dev **pd_wdev);
|
||||
void rtw_pd_iface_free(struct wiphy *wiphy);
|
||||
#endif
|
||||
#endif /* CONFIG_P2P */
|
||||
|
||||
void rtw_cfg80211_set_is_mgmt_tx(_adapter *adapter, u8 val);
|
||||
u8 rtw_cfg80211_get_is_mgmt_tx(_adapter *adapter);
|
||||
u8 rtw_mgnt_tx_handler(_adapter *adapter, u8 *buf);
|
||||
|
||||
void rtw_cfg80211_issue_p2p_provision_request(_adapter *padapter, const u8 *buf, size_t len);
|
||||
|
||||
void rtw_cfg80211_rx_p2p_action_public(_adapter *padapter, union recv_frame *rframe);
|
||||
void rtw_cfg80211_rx_action_p2p(_adapter *padapter, union recv_frame *rframe);
|
||||
void rtw_cfg80211_rx_action(_adapter *adapter, union recv_frame *rframe, const char *msg);
|
||||
void rtw_cfg80211_rx_mframe(_adapter *adapter, union recv_frame *rframe, const char *msg);
|
||||
void rtw_cfg80211_rx_probe_request(_adapter *padapter, union recv_frame *rframe);
|
||||
|
||||
int rtw_cfg80211_set_mgnt_wpsp2pie(struct net_device *net, char *buf, int len, int type);
|
||||
|
||||
bool rtw_cfg80211_pwr_mgmt(_adapter *adapter);
|
||||
#ifdef CONFIG_RTW_80211K
|
||||
void rtw_cfg80211_rx_rrm_action(_adapter *adapter, union recv_frame *rframe);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RFKILL_POLL
|
||||
void rtw_cfg80211_init_rfkill(struct wiphy *wiphy);
|
||||
void rtw_cfg80211_deinit_rfkill(struct wiphy *wiphy);
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) && !defined(COMPAT_KERNEL_RELEASE)
|
||||
#define rtw_cfg80211_rx_mgmt(wdev, freq, sig_dbm, buf, len, gfp) cfg80211_rx_mgmt(wdev_to_ndev(wdev), freq, buf, len, gfp)
|
||||
#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
|
||||
#define rtw_cfg80211_rx_mgmt(wdev, freq, sig_dbm, buf, len, gfp) cfg80211_rx_mgmt(wdev_to_ndev(wdev), freq, sig_dbm, buf, len, gfp)
|
||||
#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
|
||||
#define rtw_cfg80211_rx_mgmt(wdev, freq, sig_dbm, buf, len, gfp) cfg80211_rx_mgmt(wdev, freq, sig_dbm, buf, len, gfp)
|
||||
#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3 , 18 , 0))
|
||||
#define rtw_cfg80211_rx_mgmt(wdev , freq , sig_dbm , buf , len , gfp) cfg80211_rx_mgmt(wdev , freq , sig_dbm , buf , len , 0 , gfp)
|
||||
#else
|
||||
#define rtw_cfg80211_rx_mgmt(wdev , freq , sig_dbm , buf , len , gfp) cfg80211_rx_mgmt(wdev , freq , sig_dbm , buf , len , 0)
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) && !defined(COMPAT_KERNEL_RELEASE)
|
||||
#define rtw_cfg80211_send_rx_assoc(adapter, bss, buf, len) cfg80211_send_rx_assoc((adapter)->pnetdev, buf, len)
|
||||
#else
|
||||
#define rtw_cfg80211_send_rx_assoc(adapter, bss, buf, len) cfg80211_send_rx_assoc((adapter)->pnetdev, bss, buf, len)
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
|
||||
#define rtw_cfg80211_mgmt_tx_status(wdev, cookie, buf, len, ack, gfp) cfg80211_mgmt_tx_status(wdev_to_ndev(wdev), cookie, buf, len, ack, gfp)
|
||||
#else
|
||||
#define rtw_cfg80211_mgmt_tx_status(wdev, cookie, buf, len, ack, gfp) cfg80211_mgmt_tx_status(wdev, cookie, buf, len, ack, gfp)
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
|
||||
#define rtw_cfg80211_ready_on_channel(wdev, cookie, chan, channel_type, duration, gfp) cfg80211_ready_on_channel(wdev_to_ndev(wdev), cookie, chan, channel_type, duration, gfp)
|
||||
#define rtw_cfg80211_remain_on_channel_expired(wdev, cookie, chan, chan_type, gfp) cfg80211_remain_on_channel_expired(wdev_to_ndev(wdev), cookie, chan, chan_type, gfp)
|
||||
#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
|
||||
#define rtw_cfg80211_ready_on_channel(wdev, cookie, chan, channel_type, duration, gfp) cfg80211_ready_on_channel(wdev, cookie, chan, channel_type, duration, gfp)
|
||||
#define rtw_cfg80211_remain_on_channel_expired(wdev, cookie, chan, chan_type, gfp) cfg80211_remain_on_channel_expired(wdev, cookie, chan, chan_type, gfp)
|
||||
#else
|
||||
#define rtw_cfg80211_ready_on_channel(wdev, cookie, chan, channel_type, duration, gfp) cfg80211_ready_on_channel(wdev, cookie, chan, duration, gfp)
|
||||
#define rtw_cfg80211_remain_on_channel_expired(wdev, cookie, chan, chan_type, gfp) cfg80211_remain_on_channel_expired(wdev, cookie, chan, gfp)
|
||||
#endif
|
||||
|
||||
#define rtw_cfg80211_connect_result(wdev, bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, status, gfp) cfg80211_connect_result(wdev_to_ndev(wdev), bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, status, gfp)
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
|
||||
#define rtw_cfg80211_disconnected(wdev, reason, ie, ie_len, locally_generated, gfp) cfg80211_disconnected(wdev_to_ndev(wdev), reason, ie, ie_len, gfp)
|
||||
#else
|
||||
#define rtw_cfg80211_disconnected(wdev, reason, ie, ie_len, locally_generated, gfp) cfg80211_disconnected(wdev_to_ndev(wdev), reason, ie, ie_len, locally_generated, gfp)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RTW_80211R
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
|
||||
#define rtw_cfg80211_ft_event(adapter, parm) cfg80211_ft_event((adapter)->pnetdev, parm)
|
||||
#else
|
||||
#error "Cannot support FT for KERNEL_VERSION < 3.10\n"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
|
||||
#define rtw_cfg80211_notify_new_peer_candidate(wdev, addr, ie, ie_len, gfp) cfg80211_notify_new_peer_candidate(wdev_to_ndev(wdev), addr, ie, ie_len, gfp)
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
|
||||
u8 rtw_cfg80211_ch_switch_notify(_adapter *adapter, u8 ch, u8 bw, u8 offset, u8 ht);
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
|
||||
#define NL80211_BAND_2GHZ IEEE80211_BAND_2GHZ
|
||||
#define NL80211_BAND_5GHZ IEEE80211_BAND_5GHZ
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
|
||||
#define NL80211_BAND_60GHZ IEEE80211_BAND_60GHZ
|
||||
#endif
|
||||
#define NUM_NL80211_BANDS IEEE80211_NUM_BANDS
|
||||
#endif
|
||||
|
||||
#define rtw_band_to_nl80211_band(band) \
|
||||
(band == BAND_ON_2_4G) ? NL80211_BAND_2GHZ : \
|
||||
(band == BAND_ON_5G) ? NL80211_BAND_5GHZ : NUM_NL80211_BANDS
|
||||
|
||||
#include "rtw_cfgvendor.h"
|
||||
|
||||
#endif /* __IOCTL_CFG80211_H__ */
|
||||
12867
os_dep/linux/ioctl_linux.c
Normal file
12867
os_dep/linux/ioctl_linux.c
Normal file
File diff suppressed because it is too large
Load Diff
2634
os_dep/linux/ioctl_mp.c
Normal file
2634
os_dep/linux/ioctl_mp.c
Normal file
File diff suppressed because it is too large
Load Diff
430
os_dep/linux/mlme_linux.c
Normal file
430
os_dep/linux/mlme_linux.c
Normal file
@@ -0,0 +1,430 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2007 - 2017 Realtek Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
|
||||
#define _MLME_OSDEP_C_
|
||||
|
||||
#include <drv_types.h>
|
||||
|
||||
|
||||
#ifdef RTK_DMP_PLATFORM
|
||||
void Linkup_workitem_callback(struct work_struct *work)
|
||||
{
|
||||
struct mlme_priv *pmlmepriv = container_of(work, struct mlme_priv, Linkup_workitem);
|
||||
_adapter *padapter = container_of(pmlmepriv, _adapter, mlmepriv);
|
||||
|
||||
|
||||
|
||||
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 12))
|
||||
kobject_uevent(&padapter->pnetdev->dev.kobj, KOBJ_LINKUP);
|
||||
#else
|
||||
kobject_hotplug(&padapter->pnetdev->class_dev.kobj, KOBJ_LINKUP);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void Linkdown_workitem_callback(struct work_struct *work)
|
||||
{
|
||||
struct mlme_priv *pmlmepriv = container_of(work, struct mlme_priv, Linkdown_workitem);
|
||||
_adapter *padapter = container_of(pmlmepriv, _adapter, mlmepriv);
|
||||
|
||||
|
||||
|
||||
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 12))
|
||||
kobject_uevent(&padapter->pnetdev->dev.kobj, KOBJ_LINKDOWN);
|
||||
#else
|
||||
kobject_hotplug(&padapter->pnetdev->class_dev.kobj, KOBJ_LINKDOWN);
|
||||
#endif
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void rtw_indicate_wx_assoc_event(_adapter *padapter);
|
||||
extern void rtw_indicate_wx_disassoc_event(_adapter *padapter);
|
||||
|
||||
void rtw_os_indicate_connect(_adapter *adapter)
|
||||
{
|
||||
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
|
||||
|
||||
#ifdef CONFIG_IOCTL_CFG80211
|
||||
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE) ||
|
||||
(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE))
|
||||
rtw_cfg80211_ibss_indicate_connect(adapter);
|
||||
else
|
||||
rtw_cfg80211_indicate_connect(adapter);
|
||||
#endif /* CONFIG_IOCTL_CFG80211 */
|
||||
|
||||
rtw_indicate_wx_assoc_event(adapter);
|
||||
rtw_netif_carrier_on(adapter->pnetdev);
|
||||
|
||||
if (adapter->pid[2] != 0)
|
||||
rtw_signal_process(adapter->pid[2], SIGALRM);
|
||||
|
||||
#ifdef RTK_DMP_PLATFORM
|
||||
_set_workitem(&adapter->mlmepriv.Linkup_workitem);
|
||||
#endif
|
||||
|
||||
|
||||
}
|
||||
|
||||
extern void indicate_wx_scan_complete_event(_adapter *padapter);
|
||||
void rtw_os_indicate_scan_done(_adapter *padapter, bool aborted)
|
||||
{
|
||||
#ifdef CONFIG_IOCTL_CFG80211
|
||||
rtw_cfg80211_indicate_scan_done(padapter, aborted);
|
||||
#endif
|
||||
indicate_wx_scan_complete_event(padapter);
|
||||
}
|
||||
|
||||
static RT_PMKID_LIST backupPMKIDList[NUM_PMKID_CACHE];
|
||||
void rtw_reset_securitypriv(_adapter *adapter)
|
||||
{
|
||||
u8 backupPMKIDIndex = 0;
|
||||
u8 backupTKIPCountermeasure = 0x00;
|
||||
u32 backupTKIPcountermeasure_time = 0;
|
||||
/* add for CONFIG_IEEE80211W, none 11w also can use */
|
||||
_irqL irqL;
|
||||
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
|
||||
|
||||
_enter_critical_bh(&adapter->security_key_mutex, &irqL);
|
||||
|
||||
if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { /* 802.1x */
|
||||
/* Added by Albert 2009/02/18 */
|
||||
/* We have to backup the PMK information for WiFi PMK Caching test item. */
|
||||
/* */
|
||||
/* Backup the btkip_countermeasure information. */
|
||||
/* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
|
||||
|
||||
_rtw_memset(&backupPMKIDList[0], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
|
||||
|
||||
_rtw_memcpy(&backupPMKIDList[0], &adapter->securitypriv.PMKIDList[0], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
|
||||
backupPMKIDIndex = adapter->securitypriv.PMKIDIndex;
|
||||
backupTKIPCountermeasure = adapter->securitypriv.btkip_countermeasure;
|
||||
backupTKIPcountermeasure_time = adapter->securitypriv.btkip_countermeasure_time;
|
||||
_rtw_memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
|
||||
|
||||
/* Added by Albert 2009/02/18 */
|
||||
/* Restore the PMK information to securitypriv structure for the following connection. */
|
||||
_rtw_memcpy(&adapter->securitypriv.PMKIDList[0], &backupPMKIDList[0], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
|
||||
adapter->securitypriv.PMKIDIndex = backupPMKIDIndex;
|
||||
adapter->securitypriv.btkip_countermeasure = backupTKIPCountermeasure;
|
||||
adapter->securitypriv.btkip_countermeasure_time = backupTKIPcountermeasure_time;
|
||||
|
||||
adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
|
||||
adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
|
||||
|
||||
} else { /* reset values in securitypriv */
|
||||
/* if(adapter->mlmepriv.fw_state & WIFI_STATION_STATE) */
|
||||
/* { */
|
||||
struct security_priv *psec_priv = &adapter->securitypriv;
|
||||
|
||||
psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
|
||||
psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
|
||||
psec_priv->dot11PrivacyKeyIndex = 0;
|
||||
|
||||
psec_priv->dot118021XGrpPrivacy = _NO_PRIVACY_;
|
||||
psec_priv->dot118021XGrpKeyid = 1;
|
||||
|
||||
psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
|
||||
psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
|
||||
/* } */
|
||||
}
|
||||
/* add for CONFIG_IEEE80211W, none 11w also can use */
|
||||
_exit_critical_bh(&adapter->security_key_mutex, &irqL);
|
||||
|
||||
RTW_INFO(FUNC_ADPT_FMT" - End to Disconnect\n", FUNC_ADPT_ARG(adapter));
|
||||
}
|
||||
|
||||
void rtw_os_indicate_disconnect(_adapter *adapter, u16 reason, u8 locally_generated)
|
||||
{
|
||||
/* RT_PMKID_LIST backupPMKIDList[NUM_PMKID_CACHE]; */
|
||||
|
||||
|
||||
rtw_netif_carrier_off(adapter->pnetdev); /* Do it first for tx broadcast pkt after disconnection issue! */
|
||||
|
||||
#ifdef CONFIG_IOCTL_CFG80211
|
||||
rtw_cfg80211_indicate_disconnect(adapter, reason, locally_generated);
|
||||
#endif /* CONFIG_IOCTL_CFG80211 */
|
||||
|
||||
rtw_indicate_wx_disassoc_event(adapter);
|
||||
|
||||
#ifdef RTK_DMP_PLATFORM
|
||||
_set_workitem(&adapter->mlmepriv.Linkdown_workitem);
|
||||
#endif
|
||||
/* modify for CONFIG_IEEE80211W, none 11w also can use the same command */
|
||||
rtw_reset_securitypriv_cmd(adapter);
|
||||
|
||||
|
||||
}
|
||||
|
||||
void rtw_report_sec_ie(_adapter *adapter, u8 authmode, u8 *sec_ie)
|
||||
{
|
||||
uint len;
|
||||
u8 *buff, *p, i;
|
||||
union iwreq_data wrqu;
|
||||
|
||||
|
||||
|
||||
buff = NULL;
|
||||
if (authmode == _WPA_IE_ID_) {
|
||||
|
||||
buff = rtw_zmalloc(IW_CUSTOM_MAX);
|
||||
if (NULL == buff) {
|
||||
RTW_INFO(FUNC_ADPT_FMT ": alloc memory FAIL!!\n",
|
||||
FUNC_ADPT_ARG(adapter));
|
||||
return;
|
||||
}
|
||||
p = buff;
|
||||
|
||||
p += sprintf(p, "ASSOCINFO(ReqIEs=");
|
||||
|
||||
len = sec_ie[1] + 2;
|
||||
len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
p += sprintf(p, "%02x", sec_ie[i]);
|
||||
|
||||
p += sprintf(p, ")");
|
||||
|
||||
_rtw_memset(&wrqu, 0, sizeof(wrqu));
|
||||
|
||||
wrqu.data.length = p - buff;
|
||||
|
||||
wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ? wrqu.data.length : IW_CUSTOM_MAX;
|
||||
|
||||
#ifndef CONFIG_IOCTL_CFG80211
|
||||
wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
|
||||
#endif
|
||||
|
||||
rtw_mfree(buff, IW_CUSTOM_MAX);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AP_MODE
|
||||
|
||||
void rtw_indicate_sta_assoc_event(_adapter *padapter, struct sta_info *psta)
|
||||
{
|
||||
union iwreq_data wrqu;
|
||||
struct sta_priv *pstapriv = &padapter->stapriv;
|
||||
|
||||
if (psta == NULL)
|
||||
return;
|
||||
|
||||
if (psta->cmn.aid > pstapriv->max_aid)
|
||||
return;
|
||||
|
||||
if (pstapriv->sta_aid[psta->cmn.aid - 1] != psta)
|
||||
return;
|
||||
|
||||
|
||||
wrqu.addr.sa_family = ARPHRD_ETHER;
|
||||
|
||||
_rtw_memcpy(wrqu.addr.sa_data, psta->cmn.mac_addr, ETH_ALEN);
|
||||
|
||||
RTW_INFO("+rtw_indicate_sta_assoc_event\n");
|
||||
|
||||
#ifndef CONFIG_IOCTL_CFG80211
|
||||
wireless_send_event(padapter->pnetdev, IWEVREGISTERED, &wrqu, NULL);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void rtw_indicate_sta_disassoc_event(_adapter *padapter, struct sta_info *psta)
|
||||
{
|
||||
union iwreq_data wrqu;
|
||||
struct sta_priv *pstapriv = &padapter->stapriv;
|
||||
|
||||
if (psta == NULL)
|
||||
return;
|
||||
|
||||
if (psta->cmn.aid > pstapriv->max_aid)
|
||||
return;
|
||||
|
||||
if (pstapriv->sta_aid[psta->cmn.aid - 1] != psta)
|
||||
return;
|
||||
|
||||
|
||||
wrqu.addr.sa_family = ARPHRD_ETHER;
|
||||
|
||||
_rtw_memcpy(wrqu.addr.sa_data, psta->cmn.mac_addr, ETH_ALEN);
|
||||
|
||||
RTW_INFO("+rtw_indicate_sta_disassoc_event\n");
|
||||
|
||||
#ifndef CONFIG_IOCTL_CFG80211
|
||||
wireless_send_event(padapter->pnetdev, IWEVEXPIRED, &wrqu, NULL);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_HOSTAPD_MLME
|
||||
|
||||
static int mgnt_xmit_entry(struct sk_buff *skb, struct net_device *pnetdev)
|
||||
{
|
||||
struct hostapd_priv *phostapdpriv = rtw_netdev_priv(pnetdev);
|
||||
_adapter *padapter = (_adapter *)phostapdpriv->padapter;
|
||||
|
||||
/* RTW_INFO("%s\n", __FUNCTION__); */
|
||||
|
||||
return rtw_hal_hostap_mgnt_xmit_entry(padapter, skb);
|
||||
}
|
||||
|
||||
static int mgnt_netdev_open(struct net_device *pnetdev)
|
||||
{
|
||||
struct hostapd_priv *phostapdpriv = rtw_netdev_priv(pnetdev);
|
||||
|
||||
RTW_INFO("mgnt_netdev_open: MAC Address:" MAC_FMT "\n", MAC_ARG(pnetdev->dev_addr));
|
||||
|
||||
|
||||
init_usb_anchor(&phostapdpriv->anchored);
|
||||
|
||||
rtw_netif_wake_queue(pnetdev);
|
||||
|
||||
rtw_netif_carrier_on(pnetdev);
|
||||
|
||||
/* rtw_write16(phostapdpriv->padapter, 0x0116, 0x0100); */ /* only excluding beacon */
|
||||
|
||||
return 0;
|
||||
}
|
||||
static int mgnt_netdev_close(struct net_device *pnetdev)
|
||||
{
|
||||
struct hostapd_priv *phostapdpriv = rtw_netdev_priv(pnetdev);
|
||||
|
||||
RTW_INFO("%s\n", __FUNCTION__);
|
||||
|
||||
usb_kill_anchored_urbs(&phostapdpriv->anchored);
|
||||
|
||||
rtw_netif_carrier_off(pnetdev);
|
||||
|
||||
rtw_netif_stop_queue(pnetdev);
|
||||
|
||||
/* rtw_write16(phostapdpriv->padapter, 0x0116, 0x3f3f); */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
|
||||
static const struct net_device_ops rtl871x_mgnt_netdev_ops = {
|
||||
.ndo_open = mgnt_netdev_open,
|
||||
.ndo_stop = mgnt_netdev_close,
|
||||
.ndo_start_xmit = mgnt_xmit_entry,
|
||||
#if 0
|
||||
.ndo_set_mac_address = r871x_net_set_mac_address,
|
||||
.ndo_get_stats = r871x_net_get_stats,
|
||||
.ndo_do_ioctl = r871x_mp_ioctl,
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
int hostapd_mode_init(_adapter *padapter)
|
||||
{
|
||||
unsigned char mac[ETH_ALEN];
|
||||
struct hostapd_priv *phostapdpriv;
|
||||
struct net_device *pnetdev;
|
||||
|
||||
pnetdev = rtw_alloc_etherdev(sizeof(struct hostapd_priv));
|
||||
if (!pnetdev)
|
||||
return -ENOMEM;
|
||||
|
||||
/* SET_MODULE_OWNER(pnetdev); */
|
||||
ether_setup(pnetdev);
|
||||
|
||||
/* pnetdev->type = ARPHRD_IEEE80211; */
|
||||
|
||||
phostapdpriv = rtw_netdev_priv(pnetdev);
|
||||
phostapdpriv->pmgnt_netdev = pnetdev;
|
||||
phostapdpriv->padapter = padapter;
|
||||
padapter->phostapdpriv = phostapdpriv;
|
||||
|
||||
/* pnetdev->init = NULL; */
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
|
||||
|
||||
RTW_INFO("register rtl871x_mgnt_netdev_ops to netdev_ops\n");
|
||||
|
||||
pnetdev->netdev_ops = &rtl871x_mgnt_netdev_ops;
|
||||
|
||||
#else
|
||||
|
||||
pnetdev->open = mgnt_netdev_open;
|
||||
|
||||
pnetdev->stop = mgnt_netdev_close;
|
||||
|
||||
pnetdev->hard_start_xmit = mgnt_xmit_entry;
|
||||
|
||||
/* pnetdev->set_mac_address = r871x_net_set_mac_address; */
|
||||
|
||||
/* pnetdev->get_stats = r871x_net_get_stats; */
|
||||
|
||||
/* pnetdev->do_ioctl = r871x_mp_ioctl; */
|
||||
|
||||
#endif
|
||||
|
||||
pnetdev->watchdog_timeo = HZ; /* 1 second timeout */
|
||||
|
||||
/* pnetdev->wireless_handlers = NULL; */
|
||||
|
||||
#ifdef CONFIG_TCP_CSUM_OFFLOAD_TX
|
||||
pnetdev->features |= NETIF_F_IP_CSUM;
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
if (dev_alloc_name(pnetdev, "mgnt.wlan%d") < 0)
|
||||
RTW_INFO("hostapd_mode_init(): dev_alloc_name, fail!\n");
|
||||
|
||||
|
||||
/* SET_NETDEV_DEV(pnetdev, pintfpriv->udev); */
|
||||
|
||||
|
||||
mac[0] = 0x00;
|
||||
mac[1] = 0xe0;
|
||||
mac[2] = 0x4c;
|
||||
mac[3] = 0x87;
|
||||
mac[4] = 0x11;
|
||||
mac[5] = 0x12;
|
||||
|
||||
_rtw_memcpy(pnetdev->dev_addr, mac, ETH_ALEN);
|
||||
|
||||
|
||||
rtw_netif_carrier_off(pnetdev);
|
||||
|
||||
|
||||
/* Tell the network stack we exist */
|
||||
if (register_netdev(pnetdev) != 0) {
|
||||
RTW_INFO("hostapd_mode_init(): register_netdev fail!\n");
|
||||
|
||||
if (pnetdev)
|
||||
rtw_free_netdev(pnetdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
void hostapd_mode_unload(_adapter *padapter)
|
||||
{
|
||||
struct hostapd_priv *phostapdpriv = padapter->phostapdpriv;
|
||||
struct net_device *pnetdev = phostapdpriv->pmgnt_netdev;
|
||||
|
||||
unregister_netdev(pnetdev);
|
||||
rtw_free_netdev(pnetdev);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
4882
os_dep/linux/os_intfs.c
Normal file
4882
os_dep/linux/os_intfs.c
Normal file
File diff suppressed because it is too large
Load Diff
732
os_dep/linux/recv_linux.c
Normal file
732
os_dep/linux/recv_linux.c
Normal file
@@ -0,0 +1,732 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2007 - 2017 Realtek Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#define _RECV_OSDEP_C_
|
||||
|
||||
#include <drv_types.h>
|
||||
|
||||
int rtw_os_recvframe_duplicate_skb(_adapter *padapter, union recv_frame *pcloneframe, _pkt *pskb)
|
||||
{
|
||||
int res = _SUCCESS;
|
||||
_pkt *pkt_copy = NULL;
|
||||
struct rx_pkt_attrib *pattrib = &pcloneframe->u.hdr.attrib;
|
||||
|
||||
if (pskb == NULL) {
|
||||
RTW_INFO("%s [WARN] skb == NULL, drop frag frame\n", __func__);
|
||||
return _FAIL;
|
||||
}
|
||||
#if 1
|
||||
pkt_copy = rtw_skb_copy(pskb);
|
||||
|
||||
if (pkt_copy == NULL) {
|
||||
RTW_INFO("%s [WARN] rtw_skb_copy fail , drop frag frame\n", __func__);
|
||||
return _FAIL;
|
||||
}
|
||||
#else
|
||||
pkt_copy = rtw_skb_clone(pskb);
|
||||
|
||||
if (pkt_copy == NULL) {
|
||||
RTW_INFO("%s [WARN] rtw_skb_clone fail , drop frag frame\n", __func__);
|
||||
return _FAIL;
|
||||
}
|
||||
#endif
|
||||
pkt_copy->dev = padapter->pnetdev;
|
||||
|
||||
pcloneframe->u.hdr.pkt = pkt_copy;
|
||||
pcloneframe->u.hdr.rx_head = pkt_copy->head;
|
||||
pcloneframe->u.hdr.rx_data = pkt_copy->data;
|
||||
pcloneframe->u.hdr.rx_end = skb_end_pointer(pkt_copy);
|
||||
pcloneframe->u.hdr.rx_tail = skb_tail_pointer(pkt_copy);
|
||||
pcloneframe->u.hdr.len = pkt_copy->len;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
int rtw_os_alloc_recvframe(_adapter *padapter, union recv_frame *precvframe, u8 *pdata, _pkt *pskb)
|
||||
{
|
||||
int res = _SUCCESS;
|
||||
u8 shift_sz = 0;
|
||||
u32 skb_len, alloc_sz;
|
||||
_pkt *pkt_copy = NULL;
|
||||
struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
|
||||
|
||||
|
||||
if (pdata == NULL) {
|
||||
precvframe->u.hdr.pkt = NULL;
|
||||
res = _FAIL;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/* Modified by Albert 20101213 */
|
||||
/* For 8 bytes IP header alignment. */
|
||||
shift_sz = pattrib->qos ? 6 : 0; /* Qos data, wireless lan header length is 26 */
|
||||
|
||||
skb_len = pattrib->pkt_len;
|
||||
|
||||
/* for first fragment packet, driver need allocate 1536+drvinfo_sz+RXDESC_SIZE to defrag packet. */
|
||||
/* modify alloc_sz for recvive crc error packet by thomas 2011-06-02 */
|
||||
if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
|
||||
/* alloc_sz = 1664; */ /* 1664 is 128 alignment. */
|
||||
alloc_sz = (skb_len <= 1650) ? 1664 : (skb_len + 14);
|
||||
} else {
|
||||
alloc_sz = skb_len;
|
||||
/* 6 is for IP header 8 bytes alignment in QoS packet case. */
|
||||
/* 8 is for skb->data 4 bytes alignment. */
|
||||
alloc_sz += 14;
|
||||
}
|
||||
|
||||
pkt_copy = rtw_skb_alloc(alloc_sz);
|
||||
|
||||
if (pkt_copy) {
|
||||
pkt_copy->dev = padapter->pnetdev;
|
||||
pkt_copy->len = skb_len;
|
||||
precvframe->u.hdr.pkt = pkt_copy;
|
||||
precvframe->u.hdr.rx_head = pkt_copy->head;
|
||||
precvframe->u.hdr.rx_end = pkt_copy->data + alloc_sz;
|
||||
skb_reserve(pkt_copy, 8 - ((SIZE_PTR)(pkt_copy->data) & 7)); /* force pkt_copy->data at 8-byte alignment address */
|
||||
skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
|
||||
_rtw_memcpy(pkt_copy->data, pdata, skb_len);
|
||||
precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pkt_copy->data;
|
||||
} else {
|
||||
#if 0
|
||||
{
|
||||
rtw_free_recvframe(precvframe_if2, &precvpriv->free_recv_queue);
|
||||
rtw_enqueue_recvbuf_to_head(precvbuf, &precvpriv->recv_buf_pending_queue);
|
||||
|
||||
/* The case of can't allocate skb is serious and may never be recovered,
|
||||
once bDriverStopped is enable, this task should be stopped.*/
|
||||
if (!rtw_is_drv_stopped(secondary_padapter))
|
||||
#ifdef PLATFORM_LINUX
|
||||
tasklet_schedule(&precvpriv->recv_tasklet);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USE_USB_BUFFER_ALLOC_RX
|
||||
RTW_INFO("%s:can not allocate memory for skb copy\n", __func__);
|
||||
|
||||
precvframe->u.hdr.pkt = NULL;
|
||||
|
||||
/* rtw_free_recvframe(precvframe, pfree_recv_queue); */
|
||||
/*exit_rtw_os_recv_resource_alloc;*/
|
||||
|
||||
res = _FAIL;
|
||||
#else
|
||||
if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
|
||||
RTW_INFO("%s: alloc_skb fail , drop frag frame\n", __func__);
|
||||
/* rtw_free_recvframe(precvframe, pfree_recv_queue); */
|
||||
res = _FAIL;
|
||||
goto exit_rtw_os_recv_resource_alloc;
|
||||
}
|
||||
|
||||
if (pskb == NULL) {
|
||||
res = _FAIL;
|
||||
goto exit_rtw_os_recv_resource_alloc;
|
||||
}
|
||||
|
||||
precvframe->u.hdr.pkt = rtw_skb_clone(pskb);
|
||||
if (precvframe->u.hdr.pkt) {
|
||||
precvframe->u.hdr.pkt->dev = padapter->pnetdev;
|
||||
precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pdata;
|
||||
precvframe->u.hdr.rx_end = pdata + alloc_sz;
|
||||
} else {
|
||||
RTW_INFO("%s: rtw_skb_clone fail\n", __func__);
|
||||
/* rtw_free_recvframe(precvframe, pfree_recv_queue); */
|
||||
/*exit_rtw_os_recv_resource_alloc;*/
|
||||
res = _FAIL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
exit_rtw_os_recv_resource_alloc:
|
||||
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
void rtw_os_free_recvframe(union recv_frame *precvframe)
|
||||
{
|
||||
if (precvframe->u.hdr.pkt) {
|
||||
rtw_os_pkt_free(precvframe->u.hdr.pkt);
|
||||
precvframe->u.hdr.pkt = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* init os related resource in struct recv_priv */
|
||||
int rtw_os_recv_resource_init(struct recv_priv *precvpriv, _adapter *padapter)
|
||||
{
|
||||
int res = _SUCCESS;
|
||||
|
||||
|
||||
#ifdef CONFIG_RTW_NAPI
|
||||
skb_queue_head_init(&precvpriv->rx_napi_skb_queue);
|
||||
#endif /* CONFIG_RTW_NAPI */
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/* alloc os related resource in union recv_frame */
|
||||
int rtw_os_recv_resource_alloc(_adapter *padapter, union recv_frame *precvframe)
|
||||
{
|
||||
int res = _SUCCESS;
|
||||
|
||||
precvframe->u.hdr.pkt = NULL;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/* free os related resource in union recv_frame */
|
||||
void rtw_os_recv_resource_free(struct recv_priv *precvpriv)
|
||||
{
|
||||
sint i;
|
||||
union recv_frame *precvframe;
|
||||
precvframe = (union recv_frame *) precvpriv->precv_frame_buf;
|
||||
|
||||
|
||||
#ifdef CONFIG_RTW_NAPI
|
||||
if (skb_queue_len(&precvpriv->rx_napi_skb_queue))
|
||||
RTW_WARN("rx_napi_skb_queue not empty\n");
|
||||
rtw_skb_queue_purge(&precvpriv->rx_napi_skb_queue);
|
||||
#endif /* CONFIG_RTW_NAPI */
|
||||
|
||||
for (i = 0; i < NR_RECVFRAME; i++) {
|
||||
rtw_os_free_recvframe(precvframe);
|
||||
precvframe++;
|
||||
}
|
||||
}
|
||||
|
||||
/* alloc os related resource in struct recv_buf */
|
||||
int rtw_os_recvbuf_resource_alloc(_adapter *padapter, struct recv_buf *precvbuf)
|
||||
{
|
||||
int res = _SUCCESS;
|
||||
|
||||
#ifdef CONFIG_USB_HCI
|
||||
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
|
||||
struct usb_device *pusbd = pdvobjpriv->pusbdev;
|
||||
|
||||
precvbuf->irp_pending = _FALSE;
|
||||
precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (precvbuf->purb == NULL)
|
||||
res = _FAIL;
|
||||
|
||||
precvbuf->pskb = NULL;
|
||||
|
||||
precvbuf->pallocated_buf = precvbuf->pbuf = NULL;
|
||||
|
||||
precvbuf->pdata = precvbuf->phead = precvbuf->ptail = precvbuf->pend = NULL;
|
||||
|
||||
precvbuf->transfer_len = 0;
|
||||
|
||||
precvbuf->len = 0;
|
||||
|
||||
#ifdef CONFIG_USE_USB_BUFFER_ALLOC_RX
|
||||
precvbuf->pallocated_buf = rtw_usb_buffer_alloc(pusbd, (size_t)precvbuf->alloc_sz, &precvbuf->dma_transfer_addr);
|
||||
precvbuf->pbuf = precvbuf->pallocated_buf;
|
||||
if (precvbuf->pallocated_buf == NULL)
|
||||
return _FAIL;
|
||||
#endif /* CONFIG_USE_USB_BUFFER_ALLOC_RX */
|
||||
|
||||
#endif /* CONFIG_USB_HCI */
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/* free os related resource in struct recv_buf */
|
||||
int rtw_os_recvbuf_resource_free(_adapter *padapter, struct recv_buf *precvbuf)
|
||||
{
|
||||
int ret = _SUCCESS;
|
||||
|
||||
#ifdef CONFIG_USB_HCI
|
||||
|
||||
#ifdef CONFIG_USE_USB_BUFFER_ALLOC_RX
|
||||
|
||||
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
|
||||
struct usb_device *pusbd = pdvobjpriv->pusbdev;
|
||||
|
||||
rtw_usb_buffer_free(pusbd, (size_t)precvbuf->alloc_sz, precvbuf->pallocated_buf, precvbuf->dma_transfer_addr);
|
||||
precvbuf->pallocated_buf = NULL;
|
||||
precvbuf->dma_transfer_addr = 0;
|
||||
|
||||
#endif /* CONFIG_USE_USB_BUFFER_ALLOC_RX */
|
||||
|
||||
if (precvbuf->purb) {
|
||||
/* usb_kill_urb(precvbuf->purb); */
|
||||
usb_free_urb(precvbuf->purb);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USB_HCI */
|
||||
|
||||
|
||||
if (precvbuf->pskb) {
|
||||
#ifdef CONFIG_PREALLOC_RX_SKB_BUFFER
|
||||
if (rtw_free_skb_premem(precvbuf->pskb) != 0)
|
||||
#endif
|
||||
rtw_skb_free(precvbuf->pskb);
|
||||
}
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
_pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, const u8 *da, const u8 *sa, u8 *msdu, u16 msdu_len)
|
||||
{
|
||||
u16 eth_type;
|
||||
u8 *data_ptr;
|
||||
_pkt *sub_skb;
|
||||
struct rx_pkt_attrib *pattrib;
|
||||
|
||||
pattrib = &prframe->u.hdr.attrib;
|
||||
|
||||
#ifdef CONFIG_SKB_COPY
|
||||
sub_skb = rtw_skb_alloc(msdu_len + 14);
|
||||
if (sub_skb) {
|
||||
skb_reserve(sub_skb, 14);
|
||||
data_ptr = (u8 *)skb_put(sub_skb, msdu_len);
|
||||
_rtw_memcpy(data_ptr, msdu, msdu_len);
|
||||
} else
|
||||
#endif /* CONFIG_SKB_COPY */
|
||||
{
|
||||
sub_skb = rtw_skb_clone(prframe->u.hdr.pkt);
|
||||
if (sub_skb) {
|
||||
sub_skb->data = msdu;
|
||||
sub_skb->len = msdu_len;
|
||||
skb_set_tail_pointer(sub_skb, msdu_len);
|
||||
} else {
|
||||
RTW_INFO("%s(): rtw_skb_clone() Fail!!!\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
eth_type = RTW_GET_BE16(&sub_skb->data[6]);
|
||||
|
||||
if (sub_skb->len >= 8
|
||||
&& ((_rtw_memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE)
|
||||
&& eth_type != ETH_P_AARP && eth_type != ETH_P_IPX)
|
||||
|| _rtw_memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))
|
||||
) {
|
||||
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
|
||||
skb_pull(sub_skb, SNAP_SIZE);
|
||||
_rtw_memcpy(skb_push(sub_skb, ETH_ALEN), sa, ETH_ALEN);
|
||||
_rtw_memcpy(skb_push(sub_skb, ETH_ALEN), da, ETH_ALEN);
|
||||
} else {
|
||||
/* Leave Ethernet header part of hdr and full payload */
|
||||
u16 len;
|
||||
|
||||
len = htons(sub_skb->len);
|
||||
_rtw_memcpy(skb_push(sub_skb, 2), &len, 2);
|
||||
_rtw_memcpy(skb_push(sub_skb, ETH_ALEN), sa, ETH_ALEN);
|
||||
_rtw_memcpy(skb_push(sub_skb, ETH_ALEN), da, ETH_ALEN);
|
||||
}
|
||||
|
||||
return sub_skb;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RTW_NAPI
|
||||
static int napi_recv(_adapter *padapter, int budget)
|
||||
{
|
||||
_pkt *pskb;
|
||||
struct recv_priv *precvpriv = &padapter->recvpriv;
|
||||
int work_done = 0;
|
||||
struct registry_priv *pregistrypriv = &padapter->registrypriv;
|
||||
u8 rx_ok;
|
||||
|
||||
|
||||
while ((work_done < budget) &&
|
||||
(!skb_queue_empty(&precvpriv->rx_napi_skb_queue))) {
|
||||
pskb = skb_dequeue(&precvpriv->rx_napi_skb_queue);
|
||||
if (!pskb)
|
||||
break;
|
||||
|
||||
rx_ok = _FALSE;
|
||||
|
||||
#ifdef CONFIG_RTW_GRO
|
||||
if (pregistrypriv->en_gro) {
|
||||
if (rtw_napi_gro_receive(&padapter->napi, pskb) != GRO_DROP)
|
||||
rx_ok = _TRUE;
|
||||
goto next;
|
||||
}
|
||||
#endif /* CONFIG_RTW_GRO */
|
||||
|
||||
if (rtw_netif_receive_skb(padapter->pnetdev, pskb) == NET_RX_SUCCESS)
|
||||
rx_ok = _TRUE;
|
||||
|
||||
next:
|
||||
if (rx_ok == _TRUE) {
|
||||
work_done++;
|
||||
DBG_COUNTER(padapter->rx_logs.os_netif_ok);
|
||||
} else {
|
||||
DBG_COUNTER(padapter->rx_logs.os_netif_err);
|
||||
}
|
||||
}
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
int rtw_recv_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
_adapter *padapter = container_of(napi, _adapter, napi);
|
||||
int work_done = 0;
|
||||
struct recv_priv *precvpriv = &padapter->recvpriv;
|
||||
|
||||
|
||||
work_done = napi_recv(padapter, budget);
|
||||
if (work_done < budget) {
|
||||
napi_complete(napi);
|
||||
if (!skb_queue_empty(&precvpriv->rx_napi_skb_queue))
|
||||
napi_schedule(napi);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RTW_NAPI_DYNAMIC
|
||||
void dynamic_napi_th_chk (_adapter *adapter)
|
||||
{
|
||||
|
||||
if (adapter->registrypriv.en_napi) {
|
||||
struct dvobj_priv *dvobj;
|
||||
struct registry_priv *registry;
|
||||
|
||||
dvobj = adapter_to_dvobj(adapter);
|
||||
registry = &adapter->registrypriv;
|
||||
if (dvobj->traffic_stat.cur_rx_tp > registry->napi_threshold)
|
||||
dvobj->en_napi_dynamic = 1;
|
||||
else
|
||||
dvobj->en_napi_dynamic = 0;
|
||||
}
|
||||
|
||||
}
|
||||
#endif /* CONFIG_RTW_NAPI_DYNAMIC */
|
||||
#endif /* CONFIG_RTW_NAPI */
|
||||
|
||||
void rtw_os_recv_indicate_pkt(_adapter *padapter, _pkt *pkt, union recv_frame *rframe)
|
||||
{
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
struct recv_priv *precvpriv = &(padapter->recvpriv);
|
||||
struct registry_priv *pregistrypriv = &padapter->registrypriv;
|
||||
#ifdef CONFIG_BR_EXT
|
||||
void *br_port = NULL;
|
||||
#endif
|
||||
int ret;
|
||||
|
||||
/* Indicate the packets to upper layer */
|
||||
if (pkt) {
|
||||
struct ethhdr *ehdr = (struct ethhdr *)pkt->data;
|
||||
|
||||
DBG_COUNTER(padapter->rx_logs.os_indicate);
|
||||
|
||||
if (MLME_IS_AP(padapter)) {
|
||||
_pkt *pskb2 = NULL;
|
||||
struct sta_info *psta = NULL;
|
||||
struct sta_priv *pstapriv = &padapter->stapriv;
|
||||
int bmcast = IS_MCAST(ehdr->h_dest);
|
||||
|
||||
/* RTW_INFO("bmcast=%d\n", bmcast); */
|
||||
|
||||
if (_rtw_memcmp(ehdr->h_dest, adapter_mac_addr(padapter), ETH_ALEN) == _FALSE) {
|
||||
/* RTW_INFO("not ap psta=%p, addr=%pM\n", psta, ehdr->h_dest); */
|
||||
|
||||
if (bmcast) {
|
||||
psta = rtw_get_bcmc_stainfo(padapter);
|
||||
pskb2 = rtw_skb_clone(pkt);
|
||||
} else
|
||||
psta = rtw_get_stainfo(pstapriv, ehdr->h_dest);
|
||||
|
||||
if (psta) {
|
||||
struct net_device *pnetdev = (struct net_device *)padapter->pnetdev;
|
||||
|
||||
/* RTW_INFO("directly forwarding to the rtw_xmit_entry\n"); */
|
||||
|
||||
/* skb->ip_summed = CHECKSUM_NONE; */
|
||||
pkt->dev = pnetdev;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
|
||||
skb_set_queue_mapping(pkt, rtw_recv_select_queue(pkt));
|
||||
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) */
|
||||
|
||||
_rtw_xmit_entry(pkt, pnetdev);
|
||||
|
||||
if (bmcast && (pskb2 != NULL)) {
|
||||
pkt = pskb2;
|
||||
DBG_COUNTER(padapter->rx_logs.os_indicate_ap_mcast);
|
||||
} else {
|
||||
DBG_COUNTER(padapter->rx_logs.os_indicate_ap_forward);
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else { /* to APself */
|
||||
/* RTW_INFO("to APSelf\n"); */
|
||||
DBG_COUNTER(padapter->rx_logs.os_indicate_ap_self);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BR_EXT
|
||||
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) {
|
||||
/* Insert NAT2.5 RX here! */
|
||||
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
|
||||
br_port = padapter->pnetdev->br_port;
|
||||
#else
|
||||
rcu_read_lock();
|
||||
br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
|
||||
rcu_read_unlock();
|
||||
#endif
|
||||
|
||||
if (br_port) {
|
||||
int nat25_handle_frame(_adapter *priv, struct sk_buff *skb);
|
||||
|
||||
if (nat25_handle_frame(padapter, pkt) == -1) {
|
||||
/* priv->ext_stats.rx_data_drops++; */
|
||||
/* DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n"); */
|
||||
/* return FAIL; */
|
||||
|
||||
#if 1
|
||||
/* bypass this frame to upper layer!! */
|
||||
#else
|
||||
rtw_skb_free(sub_skb);
|
||||
continue;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_BR_EXT */
|
||||
|
||||
/* After eth_type_trans process , pkt->data pointer will move from ethrnet header to ip header */
|
||||
pkt->protocol = eth_type_trans(pkt, padapter->pnetdev);
|
||||
pkt->dev = padapter->pnetdev;
|
||||
pkt->ip_summed = CHECKSUM_NONE; /* CONFIG_TCP_CSUM_OFFLOAD_RX */
|
||||
|
||||
#ifdef CONFIG_RTW_NAPI
|
||||
#ifdef CONFIG_RTW_NAPI_DYNAMIC
|
||||
if (!skb_queue_empty(&precvpriv->rx_napi_skb_queue)
|
||||
&& !adapter_to_dvobj(padapter)->en_napi_dynamic
|
||||
)
|
||||
napi_recv(padapter, RTL_NAPI_WEIGHT);
|
||||
#endif
|
||||
|
||||
if (pregistrypriv->en_napi
|
||||
#ifdef CONFIG_RTW_NAPI_DYNAMIC
|
||||
&& adapter_to_dvobj(padapter)->en_napi_dynamic
|
||||
#endif
|
||||
) {
|
||||
skb_queue_tail(&precvpriv->rx_napi_skb_queue, pkt);
|
||||
#ifndef CONFIG_RTW_NAPI_V2
|
||||
napi_schedule(&padapter->napi);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_RTW_NAPI */
|
||||
|
||||
ret = rtw_netif_rx(padapter->pnetdev, pkt);
|
||||
if (ret == NET_RX_SUCCESS)
|
||||
DBG_COUNTER(padapter->rx_logs.os_netif_ok);
|
||||
else
|
||||
DBG_COUNTER(padapter->rx_logs.os_netif_err);
|
||||
}
|
||||
}
|
||||
|
||||
void rtw_handle_tkip_mic_err(_adapter *padapter, struct sta_info *sta, u8 bgroup)
|
||||
{
|
||||
#ifdef CONFIG_IOCTL_CFG80211
|
||||
enum nl80211_key_type key_type = 0;
|
||||
#endif
|
||||
union iwreq_data wrqu;
|
||||
struct iw_michaelmicfailure ev;
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
struct security_priv *psecuritypriv = &padapter->securitypriv;
|
||||
systime cur_time = 0;
|
||||
|
||||
if (psecuritypriv->last_mic_err_time == 0)
|
||||
psecuritypriv->last_mic_err_time = rtw_get_current_time();
|
||||
else {
|
||||
cur_time = rtw_get_current_time();
|
||||
|
||||
if (cur_time - psecuritypriv->last_mic_err_time < 60 * HZ) {
|
||||
psecuritypriv->btkip_countermeasure = _TRUE;
|
||||
psecuritypriv->last_mic_err_time = 0;
|
||||
psecuritypriv->btkip_countermeasure_time = cur_time;
|
||||
} else
|
||||
psecuritypriv->last_mic_err_time = rtw_get_current_time();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOCTL_CFG80211
|
||||
if (bgroup)
|
||||
key_type |= NL80211_KEYTYPE_GROUP;
|
||||
else
|
||||
key_type |= NL80211_KEYTYPE_PAIRWISE;
|
||||
|
||||
cfg80211_michael_mic_failure(padapter->pnetdev, sta->cmn.mac_addr, key_type, -1, NULL, GFP_ATOMIC);
|
||||
#endif
|
||||
|
||||
_rtw_memset(&ev, 0x00, sizeof(ev));
|
||||
if (bgroup)
|
||||
ev.flags |= IW_MICFAILURE_GROUP;
|
||||
else
|
||||
ev.flags |= IW_MICFAILURE_PAIRWISE;
|
||||
|
||||
ev.src_addr.sa_family = ARPHRD_ETHER;
|
||||
_rtw_memcpy(ev.src_addr.sa_data, sta->cmn.mac_addr, ETH_ALEN);
|
||||
|
||||
_rtw_memset(&wrqu, 0x00, sizeof(wrqu));
|
||||
wrqu.data.length = sizeof(ev);
|
||||
|
||||
#ifndef CONFIG_IOCTL_CFG80211
|
||||
wireless_send_event(padapter->pnetdev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOSTAPD_MLME
|
||||
void rtw_hostapd_mlme_rx(_adapter *padapter, union recv_frame *precv_frame)
|
||||
{
|
||||
_pkt *skb;
|
||||
struct hostapd_priv *phostapdpriv = padapter->phostapdpriv;
|
||||
struct net_device *pmgnt_netdev = phostapdpriv->pmgnt_netdev;
|
||||
|
||||
|
||||
skb = precv_frame->u.hdr.pkt;
|
||||
|
||||
if (skb == NULL)
|
||||
return;
|
||||
|
||||
skb->data = precv_frame->u.hdr.rx_data;
|
||||
skb->tail = precv_frame->u.hdr.rx_tail;
|
||||
skb->len = precv_frame->u.hdr.len;
|
||||
|
||||
/* pskb_copy = rtw_skb_copy(skb);
|
||||
if(skb == NULL) goto _exit; */
|
||||
|
||||
skb->dev = pmgnt_netdev;
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
/* skb->protocol = __constant_htons(0x0019); ETH_P_80211_RAW */
|
||||
skb->protocol = htons(0x0003); /*ETH_P_80211_RAW*/
|
||||
|
||||
/* RTW_INFO("(1)data=0x%x, head=0x%x, tail=0x%x, mac_header=0x%x, len=%d\n", skb->data, skb->head, skb->tail, skb->mac_header, skb->len); */
|
||||
|
||||
/* skb->mac.raw = skb->data; */
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
/* skb_pull(skb, 24); */
|
||||
_rtw_memset(skb->cb, 0, sizeof(skb->cb));
|
||||
|
||||
rtw_netif_rx(pmgnt_netdev, skb);
|
||||
|
||||
precv_frame->u.hdr.pkt = NULL; /* set pointer to NULL before rtw_free_recvframe() if call rtw_netif_rx() */
|
||||
}
|
||||
#endif /* CONFIG_HOSTAPD_MLME */
|
||||
|
||||
int rtw_recv_monitor(_adapter *padapter, union recv_frame *precv_frame)
|
||||
{
|
||||
int ret = _FAIL;
|
||||
struct recv_priv *precvpriv;
|
||||
_queue *pfree_recv_queue;
|
||||
_pkt *skb;
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
struct rx_pkt_attrib *pattrib;
|
||||
|
||||
if (NULL == precv_frame)
|
||||
goto _recv_drop;
|
||||
|
||||
pattrib = &precv_frame->u.hdr.attrib;
|
||||
precvpriv = &(padapter->recvpriv);
|
||||
pfree_recv_queue = &(precvpriv->free_recv_queue);
|
||||
|
||||
skb = precv_frame->u.hdr.pkt;
|
||||
if (skb == NULL) {
|
||||
RTW_INFO("%s :skb==NULL something wrong!!!!\n", __func__);
|
||||
goto _recv_drop;
|
||||
}
|
||||
|
||||
skb->data = precv_frame->u.hdr.rx_data;
|
||||
skb_set_tail_pointer(skb, precv_frame->u.hdr.len);
|
||||
skb->len = precv_frame->u.hdr.len;
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
skb->protocol = htons(0x0019); /* ETH_P_80211_RAW */
|
||||
|
||||
rtw_netif_rx(padapter->pnetdev, skb);
|
||||
|
||||
/* pointers to NULL before rtw_free_recvframe() */
|
||||
precv_frame->u.hdr.pkt = NULL;
|
||||
|
||||
ret = _SUCCESS;
|
||||
|
||||
_recv_drop:
|
||||
|
||||
/* enqueue back to free_recv_queue */
|
||||
if (precv_frame)
|
||||
rtw_free_recvframe(precv_frame, pfree_recv_queue);
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
inline void rtw_rframe_set_os_pkt(union recv_frame *rframe)
|
||||
{
|
||||
_pkt *skb = rframe->u.hdr.pkt;
|
||||
|
||||
skb->data = rframe->u.hdr.rx_data;
|
||||
skb_set_tail_pointer(skb, rframe->u.hdr.len);
|
||||
skb->len = rframe->u.hdr.len;
|
||||
}
|
||||
|
||||
int rtw_recv_indicatepkt(_adapter *padapter, union recv_frame *precv_frame)
|
||||
{
|
||||
struct recv_priv *precvpriv;
|
||||
_queue *pfree_recv_queue;
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
|
||||
precvpriv = &(padapter->recvpriv);
|
||||
pfree_recv_queue = &(precvpriv->free_recv_queue);
|
||||
|
||||
if (precv_frame->u.hdr.pkt == NULL)
|
||||
goto _recv_indicatepkt_drop;
|
||||
|
||||
rtw_os_recv_indicate_pkt(padapter, precv_frame->u.hdr.pkt, precv_frame);
|
||||
|
||||
_recv_indicatepkt_end:
|
||||
precv_frame->u.hdr.pkt = NULL;
|
||||
rtw_free_recvframe(precv_frame, pfree_recv_queue);
|
||||
return _SUCCESS;
|
||||
|
||||
_recv_indicatepkt_drop:
|
||||
rtw_free_recvframe(precv_frame, pfree_recv_queue);
|
||||
DBG_COUNTER(padapter->rx_logs.os_indicate_err);
|
||||
return _FAIL;
|
||||
}
|
||||
|
||||
void rtw_os_read_port(_adapter *padapter, struct recv_buf *precvbuf)
|
||||
{
|
||||
struct recv_priv *precvpriv = &padapter->recvpriv;
|
||||
|
||||
#ifdef CONFIG_USB_HCI
|
||||
|
||||
precvbuf->ref_cnt--;
|
||||
|
||||
/* free skb in recv_buf */
|
||||
rtw_skb_free(precvbuf->pskb);
|
||||
|
||||
precvbuf->pskb = NULL;
|
||||
|
||||
if (precvbuf->irp_pending == _FALSE)
|
||||
rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
|
||||
|
||||
|
||||
#endif
|
||||
#if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
|
||||
precvbuf->pskb = NULL;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
844
os_dep/linux/rhashtable.c
Normal file
844
os_dep/linux/rhashtable.c
Normal file
@@ -0,0 +1,844 @@
|
||||
/*
|
||||
* Resizable, Scalable, Concurrent Hash Table
|
||||
*
|
||||
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
|
||||
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
|
||||
*
|
||||
* Code partially derived from nft_hash
|
||||
* Rewritten with rehash code from br_multicast plus single list
|
||||
* pointer as suggested by Josh Triplett
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#define HASH_DEFAULT_SIZE 64UL
|
||||
#define HASH_MIN_SIZE 4U
|
||||
#define BUCKET_LOCKS_PER_CPU 128UL
|
||||
|
||||
static u32 head_hashfn(struct rhashtable *ht,
|
||||
const struct bucket_table *tbl,
|
||||
const struct rhash_head *he)
|
||||
{
|
||||
return rht_head_hashfn(ht, tbl, he, ht->p);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
|
||||
|
||||
int lockdep_rht_mutex_is_held(struct rhashtable *ht)
|
||||
{
|
||||
return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
|
||||
}
|
||||
|
||||
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
|
||||
{
|
||||
spinlock_t *lock = rht_bucket_lock(tbl, hash);
|
||||
|
||||
return (debug_locks) ? lockdep_is_held(lock) : 1;
|
||||
}
|
||||
#else
|
||||
#define ASSERT_RHT_MUTEX(HT)
|
||||
#endif
|
||||
|
||||
|
||||
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned int i, size;
|
||||
#if defined(CONFIG_PROVE_LOCKING)
|
||||
unsigned int nr_pcpus = 2;
|
||||
#else
|
||||
unsigned int nr_pcpus = num_possible_cpus();
|
||||
#endif
|
||||
|
||||
nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
|
||||
size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
|
||||
|
||||
/* Never allocate more than 0.5 locks per bucket */
|
||||
size = min_t(unsigned int, size, tbl->size >> 1);
|
||||
|
||||
if (sizeof(spinlock_t) != 0) {
|
||||
#ifdef CONFIG_NUMA
|
||||
if (size * sizeof(spinlock_t) > PAGE_SIZE &&
|
||||
gfp == GFP_KERNEL)
|
||||
tbl->locks = vmalloc(size * sizeof(spinlock_t));
|
||||
else
|
||||
#endif
|
||||
tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
|
||||
gfp);
|
||||
if (!tbl->locks)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < size; i++)
|
||||
spin_lock_init(&tbl->locks[i]);
|
||||
}
|
||||
tbl->locks_mask = size - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bucket_table_free(const struct bucket_table *tbl)
|
||||
{
|
||||
if (tbl)
|
||||
kvfree(tbl->locks);
|
||||
|
||||
kvfree(tbl);
|
||||
}
|
||||
|
||||
static void bucket_table_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
bucket_table_free(container_of(head, struct bucket_table, rcu));
|
||||
}
|
||||
|
||||
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
||||
size_t nbuckets,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct bucket_table *tbl = NULL;
|
||||
size_t size;
|
||||
int i;
|
||||
|
||||
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
|
||||
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
|
||||
gfp != GFP_KERNEL)
|
||||
tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (tbl == NULL && gfp == GFP_KERNEL)
|
||||
tbl = vzalloc(size);
|
||||
if (tbl == NULL)
|
||||
return NULL;
|
||||
|
||||
tbl->size = nbuckets;
|
||||
|
||||
if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
|
||||
bucket_table_free(tbl);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&tbl->walkers);
|
||||
|
||||
get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
|
||||
|
||||
for (i = 0; i < nbuckets; i++)
|
||||
INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
|
||||
|
||||
return tbl;
|
||||
}
|
||||
|
||||
static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
|
||||
struct bucket_table *tbl)
|
||||
{
|
||||
struct bucket_table *new_tbl;
|
||||
|
||||
do {
|
||||
new_tbl = tbl;
|
||||
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
||||
} while (tbl);
|
||||
|
||||
return new_tbl;
|
||||
}
|
||||
|
||||
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
|
||||
{
|
||||
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
struct bucket_table *new_tbl = rhashtable_last_table(ht,
|
||||
rht_dereference_rcu(old_tbl->future_tbl, ht));
|
||||
struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
|
||||
int err = -ENOENT;
|
||||
struct rhash_head *head, *next, *entry;
|
||||
spinlock_t *new_bucket_lock;
|
||||
unsigned int new_hash;
|
||||
|
||||
rht_for_each(entry, old_tbl, old_hash) {
|
||||
err = 0;
|
||||
next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
|
||||
|
||||
if (rht_is_a_nulls(next))
|
||||
break;
|
||||
|
||||
pprev = &entry->next;
|
||||
}
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
new_hash = head_hashfn(ht, new_tbl, entry);
|
||||
|
||||
new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
|
||||
|
||||
spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
|
||||
head = rht_dereference_bucket(new_tbl->buckets[new_hash],
|
||||
new_tbl, new_hash);
|
||||
|
||||
RCU_INIT_POINTER(entry->next, head);
|
||||
|
||||
rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
|
||||
spin_unlock(new_bucket_lock);
|
||||
|
||||
rcu_assign_pointer(*pprev, next);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void rhashtable_rehash_chain(struct rhashtable *ht,
|
||||
unsigned int old_hash)
|
||||
{
|
||||
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
spinlock_t *old_bucket_lock;
|
||||
|
||||
old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
|
||||
|
||||
spin_lock_bh(old_bucket_lock);
|
||||
while (!rhashtable_rehash_one(ht, old_hash))
|
||||
;
|
||||
old_tbl->rehash++;
|
||||
spin_unlock_bh(old_bucket_lock);
|
||||
}
|
||||
|
||||
static int rhashtable_rehash_attach(struct rhashtable *ht,
|
||||
struct bucket_table *old_tbl,
|
||||
struct bucket_table *new_tbl)
|
||||
{
|
||||
/* Protect future_tbl using the first bucket lock. */
|
||||
spin_lock_bh(old_tbl->locks);
|
||||
|
||||
/* Did somebody beat us to it? */
|
||||
if (rcu_access_pointer(old_tbl->future_tbl)) {
|
||||
spin_unlock_bh(old_tbl->locks);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
/* Make insertions go into the new, empty table right away. Deletions
|
||||
* and lookups will be attempted in both tables until we synchronize.
|
||||
*/
|
||||
rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
|
||||
|
||||
/* Ensure the new table is visible to readers. */
|
||||
smp_wmb();
|
||||
|
||||
spin_unlock_bh(old_tbl->locks);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rhashtable_rehash_table(struct rhashtable *ht)
|
||||
{
|
||||
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
struct bucket_table *new_tbl;
|
||||
struct rhashtable_walker *walker;
|
||||
unsigned int old_hash;
|
||||
|
||||
new_tbl = rht_dereference(old_tbl->future_tbl, ht);
|
||||
if (!new_tbl)
|
||||
return 0;
|
||||
|
||||
for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
|
||||
rhashtable_rehash_chain(ht, old_hash);
|
||||
|
||||
/* Publish the new table pointer. */
|
||||
rcu_assign_pointer(ht->tbl, new_tbl);
|
||||
|
||||
spin_lock(&ht->lock);
|
||||
list_for_each_entry(walker, &old_tbl->walkers, list)
|
||||
walker->tbl = NULL;
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
/* Wait for readers. All new readers will see the new
|
||||
* table, and thus no references to the old table will
|
||||
* remain.
|
||||
*/
|
||||
call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
|
||||
|
||||
return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_expand - Expand hash table while allowing concurrent lookups
|
||||
* @ht: the hash table to expand
|
||||
*
|
||||
* A secondary bucket array is allocated and the hash entries are migrated.
|
||||
*
|
||||
* This function may only be called in a context where it is safe to call
|
||||
* synchronize_rcu(), e.g. not within a rcu_read_lock() section.
|
||||
*
|
||||
* The caller must ensure that no concurrent resizing occurs by holding
|
||||
* ht->mutex.
|
||||
*
|
||||
* It is valid to have concurrent insertions and deletions protected by per
|
||||
* bucket locks or concurrent RCU protected lookups and traversals.
|
||||
*/
|
||||
static int rhashtable_expand(struct rhashtable *ht)
|
||||
{
|
||||
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
int err;
|
||||
|
||||
ASSERT_RHT_MUTEX(ht);
|
||||
|
||||
old_tbl = rhashtable_last_table(ht, old_tbl);
|
||||
|
||||
new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
|
||||
if (new_tbl == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
|
||||
if (err)
|
||||
bucket_table_free(new_tbl);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_shrink - Shrink hash table while allowing concurrent lookups
|
||||
* @ht: the hash table to shrink
|
||||
*
|
||||
* This function shrinks the hash table to fit, i.e., the smallest
|
||||
* size would not cause it to expand right away automatically.
|
||||
*
|
||||
* The caller must ensure that no concurrent resizing occurs by holding
|
||||
* ht->mutex.
|
||||
*
|
||||
* The caller must ensure that no concurrent table mutations take place.
|
||||
* It is however valid to have concurrent lookups if they are RCU protected.
|
||||
*
|
||||
* It is valid to have concurrent insertions and deletions protected by per
|
||||
* bucket locks or concurrent RCU protected lookups and traversals.
|
||||
*/
|
||||
static int rhashtable_shrink(struct rhashtable *ht)
|
||||
{
|
||||
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
unsigned int size;
|
||||
int err;
|
||||
|
||||
ASSERT_RHT_MUTEX(ht);
|
||||
|
||||
size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
|
||||
if (size < ht->p.min_size)
|
||||
size = ht->p.min_size;
|
||||
|
||||
if (old_tbl->size <= size)
|
||||
return 0;
|
||||
|
||||
if (rht_dereference(old_tbl->future_tbl, ht))
|
||||
return -EEXIST;
|
||||
|
||||
new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
|
||||
if (new_tbl == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
|
||||
if (err)
|
||||
bucket_table_free(new_tbl);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void rht_deferred_worker(struct work_struct *work)
|
||||
{
|
||||
struct rhashtable *ht;
|
||||
struct bucket_table *tbl;
|
||||
int err = 0;
|
||||
|
||||
ht = container_of(work, struct rhashtable, run_work);
|
||||
mutex_lock(&ht->mutex);
|
||||
|
||||
tbl = rht_dereference(ht->tbl, ht);
|
||||
tbl = rhashtable_last_table(ht, tbl);
|
||||
|
||||
if (rht_grow_above_75(ht, tbl))
|
||||
rhashtable_expand(ht);
|
||||
else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
|
||||
rhashtable_shrink(ht);
|
||||
|
||||
err = rhashtable_rehash_table(ht);
|
||||
|
||||
mutex_unlock(&ht->mutex);
|
||||
|
||||
if (err)
|
||||
schedule_work(&ht->run_work);
|
||||
}
|
||||
|
||||
static bool rhashtable_check_elasticity(struct rhashtable *ht,
|
||||
struct bucket_table *tbl,
|
||||
unsigned int hash)
|
||||
{
|
||||
unsigned int elasticity = ht->elasticity;
|
||||
struct rhash_head *head;
|
||||
|
||||
rht_for_each(head, tbl, hash)
|
||||
if (!--elasticity)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int rhashtable_insert_rehash(struct rhashtable *ht,
|
||||
struct bucket_table *tbl)
|
||||
{
|
||||
struct bucket_table *old_tbl;
|
||||
struct bucket_table *new_tbl;
|
||||
unsigned int size;
|
||||
int err;
|
||||
|
||||
old_tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||
|
||||
size = tbl->size;
|
||||
|
||||
err = -EBUSY;
|
||||
|
||||
if (rht_grow_above_75(ht, tbl))
|
||||
size *= 2;
|
||||
/* Do not schedule more than one rehash */
|
||||
else if (old_tbl != tbl)
|
||||
goto fail;
|
||||
|
||||
err = -ENOMEM;
|
||||
|
||||
new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
|
||||
if (new_tbl == NULL)
|
||||
goto fail;
|
||||
|
||||
err = rhashtable_rehash_attach(ht, tbl, new_tbl);
|
||||
if (err) {
|
||||
bucket_table_free(new_tbl);
|
||||
if (err == -EEXIST)
|
||||
err = 0;
|
||||
} else
|
||||
schedule_work(&ht->run_work);
|
||||
|
||||
return err;
|
||||
|
||||
fail:
|
||||
/* Do not fail the insert if someone else did a rehash. */
|
||||
if (likely(rcu_dereference_raw(tbl->future_tbl)))
|
||||
return 0;
|
||||
|
||||
/* Schedule async rehash to retry allocation in process context. */
|
||||
if (err == -ENOMEM)
|
||||
schedule_work(&ht->run_work);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
|
||||
const void *key,
|
||||
struct rhash_head *obj,
|
||||
struct bucket_table *tbl)
|
||||
{
|
||||
struct rhash_head *head;
|
||||
unsigned int hash;
|
||||
int err;
|
||||
|
||||
tbl = rhashtable_last_table(ht, tbl);
|
||||
hash = head_hashfn(ht, tbl, obj);
|
||||
spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
|
||||
|
||||
err = -EEXIST;
|
||||
if (key && rhashtable_lookup_fast(ht, key, ht->p))
|
||||
goto exit;
|
||||
|
||||
err = -E2BIG;
|
||||
if (unlikely(rht_grow_above_max(ht, tbl)))
|
||||
goto exit;
|
||||
|
||||
err = -EAGAIN;
|
||||
if (rhashtable_check_elasticity(ht, tbl, hash) ||
|
||||
rht_grow_above_100(ht, tbl))
|
||||
goto exit;
|
||||
|
||||
err = 0;
|
||||
|
||||
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
|
||||
|
||||
RCU_INIT_POINTER(obj->next, head);
|
||||
|
||||
rcu_assign_pointer(tbl->buckets[hash], obj);
|
||||
|
||||
atomic_inc(&ht->nelems);
|
||||
|
||||
exit:
|
||||
spin_unlock(rht_bucket_lock(tbl, hash));
|
||||
|
||||
if (err == 0)
|
||||
return NULL;
|
||||
else if (err == -EAGAIN)
|
||||
return tbl;
|
||||
else
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_walk_init - Initialise an iterator
|
||||
* @ht: Table to walk over
|
||||
* @iter: Hash table Iterator
|
||||
*
|
||||
* This function prepares a hash table walk.
|
||||
*
|
||||
* Note that if you restart a walk after rhashtable_walk_stop you
|
||||
* may see the same object twice. Also, you may miss objects if
|
||||
* there are removals in between rhashtable_walk_stop and the next
|
||||
* call to rhashtable_walk_start.
|
||||
*
|
||||
* For a completely stable walk you should construct your own data
|
||||
* structure outside the hash table.
|
||||
*
|
||||
* This function may sleep so you must not call it from interrupt
|
||||
* context or with spin locks held.
|
||||
*
|
||||
* You must call rhashtable_walk_exit if this function returns
|
||||
* successfully.
|
||||
*/
|
||||
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
|
||||
{
|
||||
iter->ht = ht;
|
||||
iter->p = NULL;
|
||||
iter->slot = 0;
|
||||
iter->skip = 0;
|
||||
|
||||
iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
|
||||
if (!iter->walker)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&ht->lock);
|
||||
iter->walker->tbl =
|
||||
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
|
||||
list_add(&iter->walker->list, &iter->walker->tbl->walkers);
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_walk_exit - Free an iterator
|
||||
* @iter: Hash table Iterator
|
||||
*
|
||||
* This function frees resources allocated by rhashtable_walk_init.
|
||||
*/
|
||||
void rhashtable_walk_exit(struct rhashtable_iter *iter)
|
||||
{
|
||||
spin_lock(&iter->ht->lock);
|
||||
if (iter->walker->tbl)
|
||||
list_del(&iter->walker->list);
|
||||
spin_unlock(&iter->ht->lock);
|
||||
kfree(iter->walker);
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_walk_start - Start a hash table walk
|
||||
* @iter: Hash table iterator
|
||||
*
|
||||
* Start a hash table walk. Note that we take the RCU lock in all
|
||||
* cases including when we return an error. So you must always call
|
||||
* rhashtable_walk_stop to clean up.
|
||||
*
|
||||
* Returns zero if successful.
|
||||
*
|
||||
* Returns -EAGAIN if resize event occured. Note that the iterator
|
||||
* will rewind back to the beginning and you may use it immediately
|
||||
* by calling rhashtable_walk_next.
|
||||
*/
|
||||
int rhashtable_walk_start(struct rhashtable_iter *iter)
|
||||
__acquires(RCU)
|
||||
{
|
||||
struct rhashtable *ht = iter->ht;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
spin_lock(&ht->lock);
|
||||
if (iter->walker->tbl)
|
||||
list_del(&iter->walker->list);
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
if (!iter->walker->tbl) {
|
||||
iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_walk_next - Return the next object and advance the iterator
|
||||
* @iter: Hash table iterator
|
||||
*
|
||||
* Note that you must call rhashtable_walk_stop when you are finished
|
||||
* with the walk.
|
||||
*
|
||||
* Returns the next object or NULL when the end of the table is reached.
|
||||
*
|
||||
* Returns -EAGAIN if resize event occured. Note that the iterator
|
||||
* will rewind back to the beginning and you may continue to use it.
|
||||
*/
|
||||
void *rhashtable_walk_next(struct rhashtable_iter *iter)
|
||||
{
|
||||
struct bucket_table *tbl = iter->walker->tbl;
|
||||
struct rhashtable *ht = iter->ht;
|
||||
struct rhash_head *p = iter->p;
|
||||
|
||||
if (p) {
|
||||
p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
|
||||
goto next;
|
||||
}
|
||||
|
||||
for (; iter->slot < tbl->size; iter->slot++) {
|
||||
int skip = iter->skip;
|
||||
|
||||
rht_for_each_rcu(p, tbl, iter->slot) {
|
||||
if (!skip)
|
||||
break;
|
||||
skip--;
|
||||
}
|
||||
|
||||
next:
|
||||
if (!rht_is_a_nulls(p)) {
|
||||
iter->skip++;
|
||||
iter->p = p;
|
||||
return rht_obj(ht, p);
|
||||
}
|
||||
|
||||
iter->skip = 0;
|
||||
}
|
||||
|
||||
iter->p = NULL;
|
||||
|
||||
/* Ensure we see any new tables. */
|
||||
smp_rmb();
|
||||
|
||||
iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
||||
if (iter->walker->tbl) {
|
||||
iter->slot = 0;
|
||||
iter->skip = 0;
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_walk_stop - Finish a hash table walk
|
||||
* @iter: Hash table iterator
|
||||
*
|
||||
* Finish a hash table walk.
|
||||
*/
|
||||
void rhashtable_walk_stop(struct rhashtable_iter *iter)
|
||||
__releases(RCU)
|
||||
{
|
||||
struct rhashtable *ht;
|
||||
struct bucket_table *tbl = iter->walker->tbl;
|
||||
|
||||
if (!tbl)
|
||||
goto out;
|
||||
|
||||
ht = iter->ht;
|
||||
|
||||
spin_lock(&ht->lock);
|
||||
if (tbl->rehash < tbl->size)
|
||||
list_add(&iter->walker->list, &tbl->walkers);
|
||||
else
|
||||
iter->walker->tbl = NULL;
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
iter->p = NULL;
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
|
||||
{
|
||||
return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
|
||||
(unsigned long)params->min_size);
|
||||
}
|
||||
|
||||
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
|
||||
{
|
||||
return jhash2(key, length, seed);
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_init - initialize a new hash table
|
||||
* @ht: hash table to be initialized
|
||||
* @params: configuration parameters
|
||||
*
|
||||
* Initializes a new hash table based on the provided configuration
|
||||
* parameters. A table can be configured either with a variable or
|
||||
* fixed length key:
|
||||
*
|
||||
* Configuration Example 1: Fixed length keys
|
||||
* struct test_obj {
|
||||
* int key;
|
||||
* void * my_member;
|
||||
* struct rhash_head node;
|
||||
* };
|
||||
*
|
||||
* struct rhashtable_params params = {
|
||||
* .head_offset = offsetof(struct test_obj, node),
|
||||
* .key_offset = offsetof(struct test_obj, key),
|
||||
* .key_len = sizeof(int),
|
||||
* .hashfn = jhash,
|
||||
* .nulls_base = (1U << RHT_BASE_SHIFT),
|
||||
* };
|
||||
*
|
||||
* Configuration Example 2: Variable length keys
|
||||
* struct test_obj {
|
||||
* [...]
|
||||
* struct rhash_head node;
|
||||
* };
|
||||
*
|
||||
* u32 my_hash_fn(const void *data, u32 len, u32 seed)
|
||||
* {
|
||||
* struct test_obj *obj = data;
|
||||
*
|
||||
* return [... hash ...];
|
||||
* }
|
||||
*
|
||||
* struct rhashtable_params params = {
|
||||
* .head_offset = offsetof(struct test_obj, node),
|
||||
* .hashfn = jhash,
|
||||
* .obj_hashfn = my_hash_fn,
|
||||
* };
|
||||
*/
|
||||
int rhashtable_init(struct rhashtable *ht,
|
||||
const struct rhashtable_params *params)
|
||||
{
|
||||
struct bucket_table *tbl;
|
||||
size_t size;
|
||||
|
||||
size = HASH_DEFAULT_SIZE;
|
||||
|
||||
if ((!params->key_len && !params->obj_hashfn) ||
|
||||
(params->obj_hashfn && !params->obj_cmpfn))
|
||||
return -EINVAL;
|
||||
|
||||
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
|
||||
return -EINVAL;
|
||||
|
||||
memset(ht, 0, sizeof(*ht));
|
||||
mutex_init(&ht->mutex);
|
||||
spin_lock_init(&ht->lock);
|
||||
memcpy(&ht->p, params, sizeof(*params));
|
||||
|
||||
if (params->min_size)
|
||||
ht->p.min_size = roundup_pow_of_two(params->min_size);
|
||||
|
||||
if (params->max_size)
|
||||
ht->p.max_size = rounddown_pow_of_two(params->max_size);
|
||||
|
||||
if (params->insecure_max_entries)
|
||||
ht->p.insecure_max_entries =
|
||||
rounddown_pow_of_two(params->insecure_max_entries);
|
||||
else
|
||||
ht->p.insecure_max_entries = ht->p.max_size * 2;
|
||||
|
||||
ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
|
||||
|
||||
if (params->nelem_hint)
|
||||
size = rounded_hashtable_size(&ht->p);
|
||||
|
||||
/* The maximum (not average) chain length grows with the
|
||||
* size of the hash table, at a rate of (log N)/(log log N).
|
||||
* The value of 16 is selected so that even if the hash
|
||||
* table grew to 2^32 you would not expect the maximum
|
||||
* chain length to exceed it unless we are under attack
|
||||
* (or extremely unlucky).
|
||||
*
|
||||
* As this limit is only to detect attacks, we don't need
|
||||
* to set it to a lower value as you'd need the chain
|
||||
* length to vastly exceed 16 to have any real effect
|
||||
* on the system.
|
||||
*/
|
||||
if (!params->insecure_elasticity)
|
||||
ht->elasticity = 16;
|
||||
|
||||
if (params->locks_mul)
|
||||
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
|
||||
else
|
||||
ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
|
||||
|
||||
ht->key_len = ht->p.key_len;
|
||||
if (!params->hashfn) {
|
||||
ht->p.hashfn = jhash;
|
||||
|
||||
if (!(ht->key_len & (sizeof(u32) - 1))) {
|
||||
ht->key_len /= sizeof(u32);
|
||||
ht->p.hashfn = rhashtable_jhash2;
|
||||
}
|
||||
}
|
||||
|
||||
tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
|
||||
if (tbl == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_set(&ht->nelems, 0);
|
||||
|
||||
RCU_INIT_POINTER(ht->tbl, tbl);
|
||||
|
||||
INIT_WORK(&ht->run_work, rht_deferred_worker);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_free_and_destroy - free elements and destroy hash table
|
||||
* @ht: the hash table to destroy
|
||||
* @free_fn: callback to release resources of element
|
||||
* @arg: pointer passed to free_fn
|
||||
*
|
||||
* Stops an eventual async resize. If defined, invokes free_fn for each
|
||||
* element to releasal resources. Please note that RCU protected
|
||||
* readers may still be accessing the elements. Releasing of resources
|
||||
* must occur in a compatible manner. Then frees the bucket array.
|
||||
*
|
||||
* This function will eventually sleep to wait for an async resize
|
||||
* to complete. The caller is responsible that no further write operations
|
||||
* occurs in parallel.
|
||||
*/
|
||||
void rhashtable_free_and_destroy(struct rhashtable *ht,
|
||||
void (*free_fn)(void *ptr, void *arg),
|
||||
void *arg)
|
||||
{
|
||||
const struct bucket_table *tbl;
|
||||
unsigned int i;
|
||||
|
||||
cancel_work_sync(&ht->run_work);
|
||||
|
||||
mutex_lock(&ht->mutex);
|
||||
tbl = rht_dereference(ht->tbl, ht);
|
||||
if (free_fn) {
|
||||
for (i = 0; i < tbl->size; i++) {
|
||||
struct rhash_head *pos, *next;
|
||||
|
||||
for (pos = rht_dereference(tbl->buckets[i], ht),
|
||||
next = !rht_is_a_nulls(pos) ?
|
||||
rht_dereference(pos->next, ht) : NULL;
|
||||
!rht_is_a_nulls(pos);
|
||||
pos = next,
|
||||
next = !rht_is_a_nulls(pos) ?
|
||||
rht_dereference(pos->next, ht) : NULL)
|
||||
free_fn(rht_obj(ht, pos), arg);
|
||||
}
|
||||
}
|
||||
|
||||
bucket_table_free(tbl);
|
||||
mutex_unlock(&ht->mutex);
|
||||
}
|
||||
|
||||
void rhashtable_destroy(struct rhashtable *ht)
|
||||
{
|
||||
return rhashtable_free_and_destroy(ht, NULL, NULL);
|
||||
}
|
||||
|
||||
827
os_dep/linux/rhashtable.h
Normal file
827
os_dep/linux/rhashtable.h
Normal file
@@ -0,0 +1,827 @@
|
||||
/*
|
||||
* Resizable, Scalable, Concurrent Hash Table
|
||||
*
|
||||
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
|
||||
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
|
||||
*
|
||||
* Code partially derived from nft_hash
|
||||
* Rewritten with rehash code from br_multicast plus single list
|
||||
* pointer as suggested by Josh Triplett
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_RHASHTABLE_H
|
||||
#define _LINUX_RHASHTABLE_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/list_nulls.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
/*
|
||||
* The end of the chain is marked with a special nulls marks which has
|
||||
* the following format:
|
||||
*
|
||||
* +-------+-----------------------------------------------------+-+
|
||||
* | Base | Hash |1|
|
||||
* +-------+-----------------------------------------------------+-+
|
||||
*
|
||||
* Base (4 bits) : Reserved to distinguish between multiple tables.
|
||||
* Specified via &struct rhashtable_params.nulls_base.
|
||||
* Hash (27 bits): Full hash (unmasked) of first element added to bucket
|
||||
* 1 (1 bit) : Nulls marker (always set)
|
||||
*
|
||||
* The remaining bits of the next pointer remain unused for now.
|
||||
*/
|
||||
#define RHT_BASE_BITS 4
|
||||
#define RHT_HASH_BITS 27
|
||||
#define RHT_BASE_SHIFT RHT_HASH_BITS
|
||||
|
||||
/* Base bits plus 1 bit for nulls marker */
|
||||
#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
|
||||
|
||||
struct rhash_head {
|
||||
struct rhash_head __rcu *next;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct bucket_table - Table of hash buckets
|
||||
* @size: Number of hash buckets
|
||||
* @rehash: Current bucket being rehashed
|
||||
* @hash_rnd: Random seed to fold into hash
|
||||
* @locks_mask: Mask to apply before accessing locks[]
|
||||
* @locks: Array of spinlocks protecting individual buckets
|
||||
* @walkers: List of active walkers
|
||||
* @rcu: RCU structure for freeing the table
|
||||
* @future_tbl: Table under construction during rehashing
|
||||
* @buckets: size * hash buckets
|
||||
*/
|
||||
struct bucket_table {
|
||||
unsigned int size;
|
||||
unsigned int rehash;
|
||||
u32 hash_rnd;
|
||||
unsigned int locks_mask;
|
||||
spinlock_t *locks;
|
||||
struct list_head walkers;
|
||||
struct rcu_head rcu;
|
||||
|
||||
struct bucket_table __rcu *future_tbl;
|
||||
|
||||
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rhashtable_compare_arg - Key for the function rhashtable_compare
|
||||
* @ht: Hash table
|
||||
* @key: Key to compare against
|
||||
*/
|
||||
struct rhashtable_compare_arg {
|
||||
struct rhashtable *ht;
|
||||
const void *key;
|
||||
};
|
||||
|
||||
typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
|
||||
typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
|
||||
typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
|
||||
const void *obj);
|
||||
|
||||
struct rhashtable;
|
||||
|
||||
/**
|
||||
* struct rhashtable_params - Hash table construction parameters
|
||||
* @nelem_hint: Hint on number of elements, should be 75% of desired size
|
||||
* @key_len: Length of key
|
||||
* @key_offset: Offset of key in struct to be hashed
|
||||
* @head_offset: Offset of rhash_head in struct to be hashed
|
||||
* @insecure_max_entries: Maximum number of entries (may be exceeded)
|
||||
* @max_size: Maximum size while expanding
|
||||
* @min_size: Minimum size while shrinking
|
||||
* @nulls_base: Base value to generate nulls marker
|
||||
* @insecure_elasticity: Set to true to disable chain length checks
|
||||
* @automatic_shrinking: Enable automatic shrinking of tables
|
||||
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
|
||||
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
|
||||
* @obj_hashfn: Function to hash object
|
||||
* @obj_cmpfn: Function to compare key with object
|
||||
*/
|
||||
struct rhashtable_params {
|
||||
size_t nelem_hint;
|
||||
size_t key_len;
|
||||
size_t key_offset;
|
||||
size_t head_offset;
|
||||
unsigned int insecure_max_entries;
|
||||
unsigned int max_size;
|
||||
unsigned int min_size;
|
||||
u32 nulls_base;
|
||||
bool insecure_elasticity;
|
||||
bool automatic_shrinking;
|
||||
size_t locks_mul;
|
||||
rht_hashfn_t hashfn;
|
||||
rht_obj_hashfn_t obj_hashfn;
|
||||
rht_obj_cmpfn_t obj_cmpfn;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rhashtable - Hash table handle
|
||||
* @tbl: Bucket table
|
||||
* @nelems: Number of elements in table
|
||||
* @key_len: Key length for hashfn
|
||||
* @elasticity: Maximum chain length before rehash
|
||||
* @p: Configuration parameters
|
||||
* @run_work: Deferred worker to expand/shrink asynchronously
|
||||
* @mutex: Mutex to protect current/future table swapping
|
||||
* @lock: Spin lock to protect walker list
|
||||
*/
|
||||
struct rhashtable {
|
||||
struct bucket_table __rcu *tbl;
|
||||
atomic_t nelems;
|
||||
unsigned int key_len;
|
||||
unsigned int elasticity;
|
||||
struct rhashtable_params p;
|
||||
struct work_struct run_work;
|
||||
struct mutex mutex;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rhashtable_walker - Hash table walker
|
||||
* @list: List entry on list of walkers
|
||||
* @tbl: The table that we were walking over
|
||||
*/
|
||||
struct rhashtable_walker {
|
||||
struct list_head list;
|
||||
struct bucket_table *tbl;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rhashtable_iter - Hash table iterator, fits into netlink cb
|
||||
* @ht: Table to iterate through
|
||||
* @p: Current pointer
|
||||
* @walker: Associated rhashtable walker
|
||||
* @slot: Current slot
|
||||
* @skip: Number of entries to skip in slot
|
||||
*/
|
||||
struct rhashtable_iter {
|
||||
struct rhashtable *ht;
|
||||
struct rhash_head *p;
|
||||
struct rhashtable_walker *walker;
|
||||
unsigned int slot;
|
||||
unsigned int skip;
|
||||
};
|
||||
|
||||
static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
|
||||
{
|
||||
return NULLS_MARKER(ht->p.nulls_base + hash);
|
||||
}
|
||||
|
||||
#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
|
||||
((ptr) = (typeof(ptr)) rht_marker(ht, hash))
|
||||
|
||||
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
|
||||
{
|
||||
return ((unsigned long) ptr & 1);
|
||||
}
|
||||
|
||||
static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
|
||||
{
|
||||
return ((unsigned long) ptr) >> 1;
|
||||
}
|
||||
|
||||
static inline void *rht_obj(const struct rhashtable *ht,
|
||||
const struct rhash_head *he)
|
||||
{
|
||||
return (char *)he - ht->p.head_offset;
|
||||
}
|
||||
|
||||
static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
|
||||
unsigned int hash)
|
||||
{
|
||||
return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
|
||||
}
|
||||
|
||||
static inline unsigned int rht_key_hashfn(
|
||||
struct rhashtable *ht, const struct bucket_table *tbl,
|
||||
const void *key, const struct rhashtable_params params)
|
||||
{
|
||||
unsigned int hash;
|
||||
|
||||
/* params must be equal to ht->p if it isn't constant. */
|
||||
if (!__builtin_constant_p(params.key_len))
|
||||
hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
|
||||
else if (params.key_len) {
|
||||
unsigned int key_len = params.key_len;
|
||||
|
||||
if (params.hashfn)
|
||||
hash = params.hashfn(key, key_len, tbl->hash_rnd);
|
||||
else if (key_len & (sizeof(u32) - 1))
|
||||
hash = jhash(key, key_len, tbl->hash_rnd);
|
||||
else
|
||||
hash = jhash2(key, key_len / sizeof(u32),
|
||||
tbl->hash_rnd);
|
||||
} else {
|
||||
unsigned int key_len = ht->p.key_len;
|
||||
|
||||
if (params.hashfn)
|
||||
hash = params.hashfn(key, key_len, tbl->hash_rnd);
|
||||
else
|
||||
hash = jhash(key, key_len, tbl->hash_rnd);
|
||||
}
|
||||
|
||||
return rht_bucket_index(tbl, hash);
|
||||
}
|
||||
|
||||
static inline unsigned int rht_head_hashfn(
|
||||
struct rhashtable *ht, const struct bucket_table *tbl,
|
||||
const struct rhash_head *he, const struct rhashtable_params params)
|
||||
{
|
||||
const char *ptr = rht_obj(ht, he);
|
||||
|
||||
return likely(params.obj_hashfn) ?
|
||||
rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
|
||||
ht->p.key_len,
|
||||
tbl->hash_rnd)) :
|
||||
rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* rht_grow_above_75 - returns true if nelems > 0.75 * table-size
|
||||
* @ht: hash table
|
||||
* @tbl: current table
|
||||
*/
|
||||
static inline bool rht_grow_above_75(const struct rhashtable *ht,
|
||||
const struct bucket_table *tbl)
|
||||
{
|
||||
/* Expand table when exceeding 75% load */
|
||||
return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
|
||||
(!ht->p.max_size || tbl->size < ht->p.max_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
|
||||
* @ht: hash table
|
||||
* @tbl: current table
|
||||
*/
|
||||
static inline bool rht_shrink_below_30(const struct rhashtable *ht,
|
||||
const struct bucket_table *tbl)
|
||||
{
|
||||
/* Shrink table beneath 30% load */
|
||||
return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
|
||||
tbl->size > ht->p.min_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* rht_grow_above_100 - returns true if nelems > table-size
|
||||
* @ht: hash table
|
||||
* @tbl: current table
|
||||
*/
|
||||
static inline bool rht_grow_above_100(const struct rhashtable *ht,
|
||||
const struct bucket_table *tbl)
|
||||
{
|
||||
return atomic_read(&ht->nelems) > tbl->size &&
|
||||
(!ht->p.max_size || tbl->size < ht->p.max_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* rht_grow_above_max - returns true if table is above maximum
|
||||
* @ht: hash table
|
||||
* @tbl: current table
|
||||
*/
|
||||
static inline bool rht_grow_above_max(const struct rhashtable *ht,
|
||||
const struct bucket_table *tbl)
|
||||
{
|
||||
return ht->p.insecure_max_entries &&
|
||||
atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
|
||||
}
|
||||
|
||||
/* The bucket lock is selected based on the hash and protects mutations
|
||||
* on a group of hash buckets.
|
||||
*
|
||||
* A maximum of tbl->size/2 bucket locks is allocated. This ensures that
|
||||
* a single lock always covers both buckets which may both contains
|
||||
* entries which link to the same bucket of the old table during resizing.
|
||||
* This allows to simplify the locking as locking the bucket in both
|
||||
* tables during resize always guarantee protection.
|
||||
*
|
||||
* IMPORTANT: When holding the bucket lock of both the old and new table
|
||||
* during expansions and shrinking, the old bucket lock must always be
|
||||
* acquired first.
|
||||
*/
|
||||
static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
|
||||
unsigned int hash)
|
||||
{
|
||||
return &tbl->locks[hash & tbl->locks_mask];
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
int lockdep_rht_mutex_is_held(struct rhashtable *ht);
|
||||
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
|
||||
#else
|
||||
static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
|
||||
u32 hash)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif /* CONFIG_PROVE_LOCKING */
|
||||
|
||||
int rhashtable_init(struct rhashtable *ht,
|
||||
const struct rhashtable_params *params);
|
||||
|
||||
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
|
||||
const void *key,
|
||||
struct rhash_head *obj,
|
||||
struct bucket_table *old_tbl);
|
||||
int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
|
||||
|
||||
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
|
||||
void rhashtable_walk_exit(struct rhashtable_iter *iter);
|
||||
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
|
||||
void *rhashtable_walk_next(struct rhashtable_iter *iter);
|
||||
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
|
||||
|
||||
void rhashtable_free_and_destroy(struct rhashtable *ht,
|
||||
void (*free_fn)(void *ptr, void *arg),
|
||||
void *arg);
|
||||
void rhashtable_destroy(struct rhashtable *ht);
|
||||
|
||||
#define rht_dereference(p, ht) \
|
||||
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
|
||||
|
||||
#define rht_dereference_rcu(p, ht) \
|
||||
rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
|
||||
|
||||
#define rht_dereference_bucket(p, tbl, hash) \
|
||||
rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
|
||||
|
||||
#define rht_dereference_bucket_rcu(p, tbl, hash) \
|
||||
rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
|
||||
|
||||
#define rht_entry(tpos, pos, member) \
|
||||
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
|
||||
|
||||
/**
|
||||
* rht_for_each_continue - continue iterating over hash chain
|
||||
* @pos: the &struct rhash_head to use as a loop cursor.
|
||||
* @head: the previous &struct rhash_head to continue from
|
||||
* @tbl: the &struct bucket_table
|
||||
* @hash: the hash value / bucket index
|
||||
*/
|
||||
#define rht_for_each_continue(pos, head, tbl, hash) \
|
||||
for (pos = rht_dereference_bucket(head, tbl, hash); \
|
||||
!rht_is_a_nulls(pos); \
|
||||
pos = rht_dereference_bucket((pos)->next, tbl, hash))
|
||||
|
||||
/**
|
||||
* rht_for_each - iterate over hash chain
|
||||
* @pos: the &struct rhash_head to use as a loop cursor.
|
||||
* @tbl: the &struct bucket_table
|
||||
* @hash: the hash value / bucket index
|
||||
*/
|
||||
#define rht_for_each(pos, tbl, hash) \
|
||||
rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
|
||||
|
||||
/**
|
||||
* rht_for_each_entry_continue - continue iterating over hash chain
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct rhash_head to use as a loop cursor.
|
||||
* @head: the previous &struct rhash_head to continue from
|
||||
* @tbl: the &struct bucket_table
|
||||
* @hash: the hash value / bucket index
|
||||
* @member: name of the &struct rhash_head within the hashable struct.
|
||||
*/
|
||||
#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
|
||||
for (pos = rht_dereference_bucket(head, tbl, hash); \
|
||||
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
|
||||
pos = rht_dereference_bucket((pos)->next, tbl, hash))
|
||||
|
||||
/**
|
||||
* rht_for_each_entry - iterate over hash chain of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct rhash_head to use as a loop cursor.
|
||||
* @tbl: the &struct bucket_table
|
||||
* @hash: the hash value / bucket index
|
||||
* @member: name of the &struct rhash_head within the hashable struct.
|
||||
*/
|
||||
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
|
||||
rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
|
||||
tbl, hash, member)
|
||||
|
||||
/**
|
||||
* rht_for_each_entry_safe - safely iterate over hash chain of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct rhash_head to use as a loop cursor.
|
||||
* @next: the &struct rhash_head to use as next in loop cursor.
|
||||
* @tbl: the &struct bucket_table
|
||||
* @hash: the hash value / bucket index
|
||||
* @member: name of the &struct rhash_head within the hashable struct.
|
||||
*
|
||||
* This hash chain list-traversal primitive allows for the looped code to
|
||||
* remove the loop cursor from the list.
|
||||
*/
|
||||
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
|
||||
for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
|
||||
next = !rht_is_a_nulls(pos) ? \
|
||||
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
|
||||
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
|
||||
pos = next, \
|
||||
next = !rht_is_a_nulls(pos) ? \
|
||||
rht_dereference_bucket(pos->next, tbl, hash) : NULL)
|
||||
|
||||
/**
|
||||
* rht_for_each_rcu_continue - continue iterating over rcu hash chain
|
||||
* @pos: the &struct rhash_head to use as a loop cursor.
|
||||
* @head: the previous &struct rhash_head to continue from
|
||||
* @tbl: the &struct bucket_table
|
||||
* @hash: the hash value / bucket index
|
||||
*
|
||||
* This hash chain list-traversal primitive may safely run concurrently with
|
||||
* the _rcu mutation primitives such as rhashtable_insert() as long as the
|
||||
* traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
|
||||
for (({barrier(); }), \
|
||||
pos = rht_dereference_bucket_rcu(head, tbl, hash); \
|
||||
!rht_is_a_nulls(pos); \
|
||||
pos = rcu_dereference_raw(pos->next))
|
||||
|
||||
/**
|
||||
* rht_for_each_rcu - iterate over rcu hash chain
|
||||
* @pos: the &struct rhash_head to use as a loop cursor.
|
||||
* @tbl: the &struct bucket_table
|
||||
* @hash: the hash value / bucket index
|
||||
*
|
||||
* This hash chain list-traversal primitive may safely run concurrently with
|
||||
* the _rcu mutation primitives such as rhashtable_insert() as long as the
|
||||
* traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define rht_for_each_rcu(pos, tbl, hash) \
|
||||
rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
|
||||
|
||||
/**
|
||||
* rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct rhash_head to use as a loop cursor.
|
||||
* @head: the previous &struct rhash_head to continue from
|
||||
* @tbl: the &struct bucket_table
|
||||
* @hash: the hash value / bucket index
|
||||
* @member: name of the &struct rhash_head within the hashable struct.
|
||||
*
|
||||
* This hash chain list-traversal primitive may safely run concurrently with
|
||||
* the _rcu mutation primitives such as rhashtable_insert() as long as the
|
||||
* traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
|
||||
for (({barrier(); }), \
|
||||
pos = rht_dereference_bucket_rcu(head, tbl, hash); \
|
||||
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
|
||||
pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
|
||||
|
||||
/**
|
||||
* rht_for_each_entry_rcu - iterate over rcu hash chain of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct rhash_head to use as a loop cursor.
|
||||
* @tbl: the &struct bucket_table
|
||||
* @hash: the hash value / bucket index
|
||||
* @member: name of the &struct rhash_head within the hashable struct.
|
||||
*
|
||||
* This hash chain list-traversal primitive may safely run concurrently with
|
||||
* the _rcu mutation primitives such as rhashtable_insert() as long as the
|
||||
* traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
|
||||
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
|
||||
tbl, hash, member)
|
||||
|
||||
static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
|
||||
const void *obj)
|
||||
{
|
||||
struct rhashtable *ht = arg->ht;
|
||||
const char *ptr = obj;
|
||||
|
||||
return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_lookup_fast - search hash table, inlined version
|
||||
* @ht: hash table
|
||||
* @key: the pointer to the key
|
||||
* @params: hash table parameters
|
||||
*
|
||||
* Computes the hash value for the key and traverses the bucket chain looking
|
||||
* for a entry with an identical key. The first matching entry is returned.
|
||||
*
|
||||
* Returns the first entry on which the compare function returned true.
|
||||
*/
|
||||
static inline void *rhashtable_lookup_fast(
|
||||
struct rhashtable *ht, const void *key,
|
||||
const struct rhashtable_params params)
|
||||
{
|
||||
struct rhashtable_compare_arg arg = {
|
||||
.ht = ht,
|
||||
.key = key,
|
||||
};
|
||||
const struct bucket_table *tbl;
|
||||
struct rhash_head *he;
|
||||
unsigned int hash;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||
restart:
|
||||
hash = rht_key_hashfn(ht, tbl, key, params);
|
||||
rht_for_each_rcu(he, tbl, hash) {
|
||||
if (params.obj_cmpfn ?
|
||||
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
|
||||
rhashtable_compare(&arg, rht_obj(ht, he)))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
return rht_obj(ht, he);
|
||||
}
|
||||
|
||||
/* Ensure we see any new tables. */
|
||||
smp_rmb();
|
||||
|
||||
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
||||
if (unlikely(tbl))
|
||||
goto restart;
|
||||
rcu_read_unlock();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Internal function, please use rhashtable_insert_fast() instead */
|
||||
static inline int __rhashtable_insert_fast(
|
||||
struct rhashtable *ht, const void *key, struct rhash_head *obj,
|
||||
const struct rhashtable_params params)
|
||||
{
|
||||
struct rhashtable_compare_arg arg = {
|
||||
.ht = ht,
|
||||
.key = key,
|
||||
};
|
||||
struct bucket_table *tbl, *new_tbl;
|
||||
struct rhash_head *head;
|
||||
spinlock_t *lock;
|
||||
unsigned int elasticity;
|
||||
unsigned int hash;
|
||||
int err;
|
||||
|
||||
restart:
|
||||
rcu_read_lock();
|
||||
|
||||
tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||
|
||||
/* All insertions must grab the oldest table containing
|
||||
* the hashed bucket that is yet to be rehashed.
|
||||
*/
|
||||
for (;;) {
|
||||
hash = rht_head_hashfn(ht, tbl, obj, params);
|
||||
lock = rht_bucket_lock(tbl, hash);
|
||||
spin_lock_bh(lock);
|
||||
|
||||
if (tbl->rehash <= hash)
|
||||
break;
|
||||
|
||||
spin_unlock_bh(lock);
|
||||
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
||||
}
|
||||
|
||||
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
||||
if (unlikely(new_tbl)) {
|
||||
tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
|
||||
if (!IS_ERR_OR_NULL(tbl))
|
||||
goto slow_path;
|
||||
|
||||
err = PTR_ERR(tbl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -E2BIG;
|
||||
if (unlikely(rht_grow_above_max(ht, tbl)))
|
||||
goto out;
|
||||
|
||||
if (unlikely(rht_grow_above_100(ht, tbl))) {
|
||||
slow_path:
|
||||
spin_unlock_bh(lock);
|
||||
err = rhashtable_insert_rehash(ht, tbl);
|
||||
rcu_read_unlock();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
goto restart;
|
||||
}
|
||||
|
||||
err = -EEXIST;
|
||||
elasticity = ht->elasticity;
|
||||
rht_for_each(head, tbl, hash) {
|
||||
if (key &&
|
||||
unlikely(!(params.obj_cmpfn ?
|
||||
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
|
||||
rhashtable_compare(&arg, rht_obj(ht, head)))))
|
||||
goto out;
|
||||
if (!--elasticity)
|
||||
goto slow_path;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
||||
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
|
||||
|
||||
RCU_INIT_POINTER(obj->next, head);
|
||||
|
||||
rcu_assign_pointer(tbl->buckets[hash], obj);
|
||||
|
||||
atomic_inc(&ht->nelems);
|
||||
if (rht_grow_above_75(ht, tbl))
|
||||
schedule_work(&ht->run_work);
|
||||
|
||||
out:
|
||||
spin_unlock_bh(lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_insert_fast - insert object into hash table
|
||||
* @ht: hash table
|
||||
* @obj: pointer to hash head inside object
|
||||
* @params: hash table parameters
|
||||
*
|
||||
* Will take a per bucket spinlock to protect against mutual mutations
|
||||
* on the same bucket. Multiple insertions may occur in parallel unless
|
||||
* they map to the same bucket lock.
|
||||
*
|
||||
* It is safe to call this function from atomic context.
|
||||
*
|
||||
* Will trigger an automatic deferred table resizing if the size grows
|
||||
* beyond the watermark indicated by grow_decision() which can be passed
|
||||
* to rhashtable_init().
|
||||
*/
|
||||
static inline int rhashtable_insert_fast(
|
||||
struct rhashtable *ht, struct rhash_head *obj,
|
||||
const struct rhashtable_params params)
|
||||
{
|
||||
return __rhashtable_insert_fast(ht, NULL, obj, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_lookup_insert_fast - lookup and insert object into hash table
|
||||
* @ht: hash table
|
||||
* @obj: pointer to hash head inside object
|
||||
* @params: hash table parameters
|
||||
*
|
||||
* Locks down the bucket chain in both the old and new table if a resize
|
||||
* is in progress to ensure that writers can't remove from the old table
|
||||
* and can't insert to the new table during the atomic operation of search
|
||||
* and insertion. Searches for duplicates in both the old and new table if
|
||||
* a resize is in progress.
|
||||
*
|
||||
* This lookup function may only be used for fixed key hash table (key_len
|
||||
* parameter set). It will BUG() if used inappropriately.
|
||||
*
|
||||
* It is safe to call this function from atomic context.
|
||||
*
|
||||
* Will trigger an automatic deferred table resizing if the size grows
|
||||
* beyond the watermark indicated by grow_decision() which can be passed
|
||||
* to rhashtable_init().
|
||||
*/
|
||||
static inline int rhashtable_lookup_insert_fast(
|
||||
struct rhashtable *ht, struct rhash_head *obj,
|
||||
const struct rhashtable_params params)
|
||||
{
|
||||
const char *key = rht_obj(ht, obj);
|
||||
|
||||
BUG_ON(ht->p.obj_hashfn);
|
||||
|
||||
return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
|
||||
params);
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_lookup_insert_key - search and insert object to hash table
|
||||
* with explicit key
|
||||
* @ht: hash table
|
||||
* @key: key
|
||||
* @obj: pointer to hash head inside object
|
||||
* @params: hash table parameters
|
||||
*
|
||||
* Locks down the bucket chain in both the old and new table if a resize
|
||||
* is in progress to ensure that writers can't remove from the old table
|
||||
* and can't insert to the new table during the atomic operation of search
|
||||
* and insertion. Searches for duplicates in both the old and new table if
|
||||
* a resize is in progress.
|
||||
*
|
||||
* Lookups may occur in parallel with hashtable mutations and resizing.
|
||||
*
|
||||
* Will trigger an automatic deferred table resizing if the size grows
|
||||
* beyond the watermark indicated by grow_decision() which can be passed
|
||||
* to rhashtable_init().
|
||||
*
|
||||
* Returns zero on success.
|
||||
*/
|
||||
static inline int rhashtable_lookup_insert_key(
|
||||
struct rhashtable *ht, const void *key, struct rhash_head *obj,
|
||||
const struct rhashtable_params params)
|
||||
{
|
||||
BUG_ON(!ht->p.obj_hashfn || !key);
|
||||
|
||||
return __rhashtable_insert_fast(ht, key, obj, params);
|
||||
}
|
||||
|
||||
/* Internal function, please use rhashtable_remove_fast() instead */
|
||||
static inline int __rhashtable_remove_fast(
|
||||
struct rhashtable *ht, struct bucket_table *tbl,
|
||||
struct rhash_head *obj, const struct rhashtable_params params)
|
||||
{
|
||||
struct rhash_head __rcu **pprev;
|
||||
struct rhash_head *he;
|
||||
spinlock_t * lock;
|
||||
unsigned int hash;
|
||||
int err = -ENOENT;
|
||||
|
||||
hash = rht_head_hashfn(ht, tbl, obj, params);
|
||||
lock = rht_bucket_lock(tbl, hash);
|
||||
|
||||
spin_lock_bh(lock);
|
||||
|
||||
pprev = &tbl->buckets[hash];
|
||||
rht_for_each(he, tbl, hash) {
|
||||
if (he != obj) {
|
||||
pprev = &he->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
rcu_assign_pointer(*pprev, obj->next);
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_bh(lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_remove_fast - remove object from hash table
|
||||
* @ht: hash table
|
||||
* @obj: pointer to hash head inside object
|
||||
* @params: hash table parameters
|
||||
*
|
||||
* Since the hash chain is single linked, the removal operation needs to
|
||||
* walk the bucket chain upon removal. The removal operation is thus
|
||||
* considerable slow if the hash table is not correctly sized.
|
||||
*
|
||||
* Will automatically shrink the table via rhashtable_expand() if the
|
||||
* shrink_decision function specified at rhashtable_init() returns true.
|
||||
*
|
||||
* Returns zero on success, -ENOENT if the entry could not be found.
|
||||
*/
|
||||
static inline int rhashtable_remove_fast(
|
||||
struct rhashtable *ht, struct rhash_head *obj,
|
||||
const struct rhashtable_params params)
|
||||
{
|
||||
struct bucket_table *tbl;
|
||||
int err;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||
|
||||
/* Because we have already taken (and released) the bucket
|
||||
* lock in old_tbl, if we find that future_tbl is not yet
|
||||
* visible then that guarantees the entry to still be in
|
||||
* the old tbl if it exists.
|
||||
*/
|
||||
while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
|
||||
(tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
|
||||
;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
atomic_dec(&ht->nelems);
|
||||
if (unlikely(ht->p.automatic_shrinking &&
|
||||
rht_shrink_below_30(ht, tbl)))
|
||||
schedule_work(&ht->run_work);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_RHASHTABLE_H */
|
||||
|
||||
1324
os_dep/linux/rtw_android.c
Normal file
1324
os_dep/linux/rtw_android.c
Normal file
File diff suppressed because it is too large
Load Diff
1566
os_dep/linux/rtw_cfgvendor.c
Normal file
1566
os_dep/linux/rtw_cfgvendor.c
Normal file
File diff suppressed because it is too large
Load Diff
559
os_dep/linux/rtw_cfgvendor.h
Normal file
559
os_dep/linux/rtw_cfgvendor.h
Normal file
@@ -0,0 +1,559 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2007 - 2017 Realtek Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#ifndef _RTW_CFGVENDOR_H_
|
||||
#define _RTW_CFGVENDOR_H_
|
||||
|
||||
#define OUI_GOOGLE 0x001A11
|
||||
#define ATTRIBUTE_U32_LEN (NLA_HDRLEN + 4)
|
||||
#define VENDOR_ID_OVERHEAD ATTRIBUTE_U32_LEN
|
||||
#define VENDOR_SUBCMD_OVERHEAD ATTRIBUTE_U32_LEN
|
||||
#define VENDOR_DATA_OVERHEAD (NLA_HDRLEN)
|
||||
|
||||
#define SCAN_RESULTS_COMPLETE_FLAG_LEN ATTRIBUTE_U32_LEN
|
||||
#define SCAN_INDEX_HDR_LEN (NLA_HDRLEN)
|
||||
#define SCAN_ID_HDR_LEN ATTRIBUTE_U32_LEN
|
||||
#define SCAN_FLAGS_HDR_LEN ATTRIBUTE_U32_LEN
|
||||
#define GSCAN_NUM_RESULTS_HDR_LEN ATTRIBUTE_U32_LEN
|
||||
#define GSCAN_RESULTS_HDR_LEN (NLA_HDRLEN)
|
||||
#define GSCAN_BATCH_RESULT_HDR_LEN (SCAN_INDEX_HDR_LEN + SCAN_ID_HDR_LEN + \
|
||||
SCAN_FLAGS_HDR_LEN + \
|
||||
GSCAN_NUM_RESULTS_HDR_LEN + \
|
||||
GSCAN_RESULTS_HDR_LEN)
|
||||
|
||||
#define VENDOR_REPLY_OVERHEAD (VENDOR_ID_OVERHEAD + \
|
||||
VENDOR_SUBCMD_OVERHEAD + \
|
||||
VENDOR_DATA_OVERHEAD)
|
||||
typedef enum {
|
||||
/* don't use 0 as a valid subcommand */
|
||||
VENDOR_NL80211_SUBCMD_UNSPECIFIED,
|
||||
|
||||
/* define all vendor startup commands between 0x0 and 0x0FFF */
|
||||
VENDOR_NL80211_SUBCMD_RANGE_START = 0x0001,
|
||||
VENDOR_NL80211_SUBCMD_RANGE_END = 0x0FFF,
|
||||
|
||||
/* define all GScan related commands between 0x1000 and 0x10FF */
|
||||
ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START = 0x1000,
|
||||
ANDROID_NL80211_SUBCMD_GSCAN_RANGE_END = 0x10FF,
|
||||
|
||||
/* define all NearbyDiscovery related commands between 0x1100 and 0x11FF */
|
||||
ANDROID_NL80211_SUBCMD_NBD_RANGE_START = 0x1100,
|
||||
ANDROID_NL80211_SUBCMD_NBD_RANGE_END = 0x11FF,
|
||||
|
||||
/* define all RTT related commands between 0x1100 and 0x11FF */
|
||||
ANDROID_NL80211_SUBCMD_RTT_RANGE_START = 0x1100,
|
||||
ANDROID_NL80211_SUBCMD_RTT_RANGE_END = 0x11FF,
|
||||
|
||||
ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START = 0x1200,
|
||||
ANDROID_NL80211_SUBCMD_LSTATS_RANGE_END = 0x12FF,
|
||||
|
||||
/* define all Logger related commands between 0x1400 and 0x14FF */
|
||||
ANDROID_NL80211_SUBCMD_DEBUG_RANGE_START = 0x1400,
|
||||
ANDROID_NL80211_SUBCMD_DEBUG_RANGE_END = 0x14FF,
|
||||
|
||||
/* define all wifi offload related commands between 0x1600 and 0x16FF */
|
||||
ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START = 0x1600,
|
||||
ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_END = 0x16FF,
|
||||
|
||||
/* define all NAN related commands between 0x1700 and 0x17FF */
|
||||
ANDROID_NL80211_SUBCMD_NAN_RANGE_START = 0x1700,
|
||||
ANDROID_NL80211_SUBCMD_NAN_RANGE_END = 0x17FF,
|
||||
|
||||
/* define all Android Packet Filter related commands between 0x1800 and 0x18FF */
|
||||
ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START = 0x1800,
|
||||
ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_END = 0x18FF,
|
||||
|
||||
/* This is reserved for future usage */
|
||||
|
||||
} ANDROID_VENDOR_SUB_COMMAND;
|
||||
|
||||
enum rtw_vendor_subcmd {
|
||||
GSCAN_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START,
|
||||
|
||||
GSCAN_SUBCMD_SET_CONFIG, /* 0x1001 */
|
||||
|
||||
GSCAN_SUBCMD_SET_SCAN_CONFIG, /* 0x1002 */
|
||||
GSCAN_SUBCMD_ENABLE_GSCAN, /* 0x1003 */
|
||||
GSCAN_SUBCMD_GET_SCAN_RESULTS, /* 0x1004 */
|
||||
GSCAN_SUBCMD_SCAN_RESULTS, /* 0x1005 */
|
||||
|
||||
GSCAN_SUBCMD_SET_HOTLIST, /* 0x1006 */
|
||||
|
||||
GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG, /* 0x1007 */
|
||||
GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS, /* 0x1008 */
|
||||
GSCAN_SUBCMD_GET_CHANNEL_LIST, /* 0x1009 */
|
||||
|
||||
WIFI_SUBCMD_GET_FEATURE_SET, /* 0x100A */
|
||||
WIFI_SUBCMD_GET_FEATURE_SET_MATRIX, /* 0x100B */
|
||||
WIFI_SUBCMD_SET_PNO_RANDOM_MAC_OUI, /* 0x100C */
|
||||
WIFI_SUBCMD_NODFS_SET, /* 0x100D */
|
||||
WIFI_SUBCMD_SET_COUNTRY_CODE, /* 0x100E */
|
||||
/* Add more sub commands here */
|
||||
GSCAN_SUBCMD_SET_EPNO_SSID, /* 0x100F */
|
||||
|
||||
WIFI_SUBCMD_SET_SSID_WHITE_LIST, /* 0x1010 */
|
||||
WIFI_SUBCMD_SET_ROAM_PARAMS, /* 0x1011 */
|
||||
WIFI_SUBCMD_ENABLE_LAZY_ROAM, /* 0x1012 */
|
||||
WIFI_SUBCMD_SET_BSSID_PREF, /* 0x1013 */
|
||||
WIFI_SUBCMD_SET_BSSID_BLACKLIST, /* 0x1014 */
|
||||
|
||||
GSCAN_SUBCMD_ANQPO_CONFIG, /* 0x1015 */
|
||||
WIFI_SUBCMD_SET_RSSI_MONITOR, /* 0x1016 */
|
||||
WIFI_SUBCMD_CONFIG_ND_OFFLOAD, /* 0x1017 */
|
||||
/* Add more sub commands here */
|
||||
|
||||
GSCAN_SUBCMD_MAX,
|
||||
|
||||
RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START,
|
||||
RTT_SUBCMD_CANCEL_CONFIG,
|
||||
RTT_SUBCMD_GETCAPABILITY,
|
||||
|
||||
APF_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START,
|
||||
APF_SUBCMD_SET_FILTER,
|
||||
};
|
||||
|
||||
enum gscan_attributes {
|
||||
GSCAN_ATTRIBUTE_NUM_BUCKETS = 10,
|
||||
GSCAN_ATTRIBUTE_BASE_PERIOD,
|
||||
GSCAN_ATTRIBUTE_BUCKETS_BAND,
|
||||
GSCAN_ATTRIBUTE_BUCKET_ID,
|
||||
GSCAN_ATTRIBUTE_BUCKET_PERIOD,
|
||||
GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS,
|
||||
GSCAN_ATTRIBUTE_BUCKET_CHANNELS,
|
||||
GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN,
|
||||
GSCAN_ATTRIBUTE_REPORT_THRESHOLD,
|
||||
GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE,
|
||||
GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND,
|
||||
|
||||
GSCAN_ATTRIBUTE_ENABLE_FEATURE = 20,
|
||||
GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE,
|
||||
GSCAN_ATTRIBUTE_FLUSH_FEATURE,
|
||||
GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS,
|
||||
GSCAN_ATTRIBUTE_REPORT_EVENTS,
|
||||
/* remaining reserved for additional attributes */
|
||||
GSCAN_ATTRIBUTE_NUM_OF_RESULTS = 30,
|
||||
GSCAN_ATTRIBUTE_FLUSH_RESULTS,
|
||||
GSCAN_ATTRIBUTE_SCAN_RESULTS, /* flat array of wifi_scan_result */
|
||||
GSCAN_ATTRIBUTE_SCAN_ID, /* indicates scan number */
|
||||
GSCAN_ATTRIBUTE_SCAN_FLAGS, /* indicates if scan was aborted */
|
||||
GSCAN_ATTRIBUTE_AP_FLAGS, /* flags on significant change event */
|
||||
GSCAN_ATTRIBUTE_NUM_CHANNELS,
|
||||
GSCAN_ATTRIBUTE_CHANNEL_LIST,
|
||||
|
||||
/* remaining reserved for additional attributes */
|
||||
|
||||
GSCAN_ATTRIBUTE_SSID = 40,
|
||||
GSCAN_ATTRIBUTE_BSSID,
|
||||
GSCAN_ATTRIBUTE_CHANNEL,
|
||||
GSCAN_ATTRIBUTE_RSSI,
|
||||
GSCAN_ATTRIBUTE_TIMESTAMP,
|
||||
GSCAN_ATTRIBUTE_RTT,
|
||||
GSCAN_ATTRIBUTE_RTTSD,
|
||||
|
||||
/* remaining reserved for additional attributes */
|
||||
|
||||
GSCAN_ATTRIBUTE_HOTLIST_BSSIDS = 50,
|
||||
GSCAN_ATTRIBUTE_RSSI_LOW,
|
||||
GSCAN_ATTRIBUTE_RSSI_HIGH,
|
||||
GSCAN_ATTRIBUTE_HOSTLIST_BSSID_ELEM,
|
||||
GSCAN_ATTRIBUTE_HOTLIST_FLUSH,
|
||||
|
||||
/* remaining reserved for additional attributes */
|
||||
GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE = 60,
|
||||
GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE,
|
||||
GSCAN_ATTRIBUTE_MIN_BREACHING,
|
||||
GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS,
|
||||
GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH,
|
||||
GSCAN_ATTRIBUTE_MAX
|
||||
};
|
||||
|
||||
enum gscan_bucket_attributes {
|
||||
GSCAN_ATTRIBUTE_CH_BUCKET_1,
|
||||
GSCAN_ATTRIBUTE_CH_BUCKET_2,
|
||||
GSCAN_ATTRIBUTE_CH_BUCKET_3,
|
||||
GSCAN_ATTRIBUTE_CH_BUCKET_4,
|
||||
GSCAN_ATTRIBUTE_CH_BUCKET_5,
|
||||
GSCAN_ATTRIBUTE_CH_BUCKET_6,
|
||||
GSCAN_ATTRIBUTE_CH_BUCKET_7
|
||||
};
|
||||
|
||||
enum gscan_ch_attributes {
|
||||
GSCAN_ATTRIBUTE_CH_ID_1,
|
||||
GSCAN_ATTRIBUTE_CH_ID_2,
|
||||
GSCAN_ATTRIBUTE_CH_ID_3,
|
||||
GSCAN_ATTRIBUTE_CH_ID_4,
|
||||
GSCAN_ATTRIBUTE_CH_ID_5,
|
||||
GSCAN_ATTRIBUTE_CH_ID_6,
|
||||
GSCAN_ATTRIBUTE_CH_ID_7
|
||||
};
|
||||
|
||||
enum rtt_attributes {
|
||||
RTT_ATTRIBUTE_TARGET_CNT,
|
||||
RTT_ATTRIBUTE_TARGET_INFO,
|
||||
RTT_ATTRIBUTE_TARGET_MAC,
|
||||
RTT_ATTRIBUTE_TARGET_TYPE,
|
||||
RTT_ATTRIBUTE_TARGET_PEER,
|
||||
RTT_ATTRIBUTE_TARGET_CHAN,
|
||||
RTT_ATTRIBUTE_TARGET_MODE,
|
||||
RTT_ATTRIBUTE_TARGET_INTERVAL,
|
||||
RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT,
|
||||
RTT_ATTRIBUTE_TARGET_NUM_PKT,
|
||||
RTT_ATTRIBUTE_TARGET_NUM_RETRY
|
||||
};
|
||||
|
||||
typedef enum rtw_vendor_event {
|
||||
RTK_RESERVED1,
|
||||
RTK_RESERVED2,
|
||||
GSCAN_EVENT_SIGNIFICANT_CHANGE_RESULTS ,
|
||||
GSCAN_EVENT_HOTLIST_RESULTS_FOUND,
|
||||
GSCAN_EVENT_SCAN_RESULTS_AVAILABLE,
|
||||
GSCAN_EVENT_FULL_SCAN_RESULTS,
|
||||
RTT_EVENT_COMPLETE,
|
||||
GSCAN_EVENT_COMPLETE_SCAN,
|
||||
GSCAN_EVENT_HOTLIST_RESULTS_LOST,
|
||||
GSCAN_EVENT_EPNO_EVENT,
|
||||
GOOGLE_DEBUG_RING_EVENT,
|
||||
GOOGLE_DEBUG_MEM_DUMP_EVENT,
|
||||
GSCAN_EVENT_ANQPO_HOTSPOT_MATCH,
|
||||
GOOGLE_RSSI_MONITOR_EVENT
|
||||
} rtw_vendor_event_t;
|
||||
|
||||
enum andr_wifi_feature_set_attr {
|
||||
ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
|
||||
ANDR_WIFI_ATTRIBUTE_FEATURE_SET
|
||||
};
|
||||
|
||||
typedef enum rtw_vendor_gscan_attribute {
|
||||
ATTR_START_GSCAN,
|
||||
ATTR_STOP_GSCAN,
|
||||
ATTR_SET_SCAN_BATCH_CFG_ID, /* set batch scan params */
|
||||
ATTR_SET_SCAN_GEOFENCE_CFG_ID, /* set list of bssids to track */
|
||||
ATTR_SET_SCAN_SIGNIFICANT_CFG_ID, /* set list of bssids, rssi threshold etc.. */
|
||||
ATTR_SET_SCAN_CFG_ID, /* set common scan config params here */
|
||||
ATTR_GET_GSCAN_CAPABILITIES_ID,
|
||||
/* Add more sub commands here */
|
||||
ATTR_GSCAN_MAX
|
||||
} rtw_vendor_gscan_attribute_t;
|
||||
|
||||
typedef enum gscan_batch_attribute {
|
||||
ATTR_GSCAN_BATCH_BESTN,
|
||||
ATTR_GSCAN_BATCH_MSCAN,
|
||||
ATTR_GSCAN_BATCH_BUFFER_THRESHOLD
|
||||
} gscan_batch_attribute_t;
|
||||
|
||||
typedef enum gscan_geofence_attribute {
|
||||
ATTR_GSCAN_NUM_HOTLIST_BSSID,
|
||||
ATTR_GSCAN_HOTLIST_BSSID
|
||||
} gscan_geofence_attribute_t;
|
||||
|
||||
typedef enum gscan_complete_event {
|
||||
WIFI_SCAN_BUFFER_FULL,
|
||||
WIFI_SCAN_COMPLETE
|
||||
} gscan_complete_event_t;
|
||||
/* wifi_hal.h */
|
||||
/* WiFi Common definitions */
|
||||
typedef unsigned char byte;
|
||||
typedef int wifi_request_id;
|
||||
typedef int wifi_channel; // indicates channel frequency in MHz
|
||||
typedef int wifi_rssi;
|
||||
typedef byte mac_addr[6];
|
||||
typedef byte oui[3];
|
||||
typedef int64_t wifi_timestamp; // In microseconds (us)
|
||||
typedef int64_t wifi_timespan; // In picoseconds (ps)
|
||||
|
||||
struct wifi_info;
|
||||
struct wifi_interface_info;
|
||||
typedef struct wifi_info *wifi_handle;
|
||||
typedef struct wifi_interface_info *wifi_interface_handle;
|
||||
|
||||
/* channel operating width */
|
||||
typedef enum {
|
||||
WIFI_CHAN_WIDTH_20 = 0,
|
||||
WIFI_CHAN_WIDTH_40 = 1,
|
||||
WIFI_CHAN_WIDTH_80 = 2,
|
||||
WIFI_CHAN_WIDTH_160 = 3,
|
||||
WIFI_CHAN_WIDTH_80P80 = 4,
|
||||
WIFI_CHAN_WIDTH_5 = 5,
|
||||
WIFI_CHAN_WIDTH_10 = 6,
|
||||
WIFI_CHAN_WIDTH_INVALID = -1
|
||||
} wifi_channel_width;
|
||||
|
||||
typedef int wifi_radio;
|
||||
|
||||
typedef struct {
|
||||
wifi_channel_width width;
|
||||
int center_frequency0;
|
||||
int center_frequency1;
|
||||
int primary_frequency;
|
||||
} wifi_channel_spec;
|
||||
|
||||
typedef enum {
|
||||
WIFI_SUCCESS = 0,
|
||||
WIFI_ERROR_NONE = 0,
|
||||
WIFI_ERROR_UNKNOWN = -1,
|
||||
WIFI_ERROR_UNINITIALIZED = -2,
|
||||
WIFI_ERROR_NOT_SUPPORTED = -3,
|
||||
WIFI_ERROR_NOT_AVAILABLE = -4, // Not available right now, but try later
|
||||
WIFI_ERROR_INVALID_ARGS = -5,
|
||||
WIFI_ERROR_INVALID_REQUEST_ID = -6,
|
||||
WIFI_ERROR_TIMED_OUT = -7,
|
||||
WIFI_ERROR_TOO_MANY_REQUESTS = -8, // Too many instances of this request
|
||||
WIFI_ERROR_OUT_OF_MEMORY = -9,
|
||||
WIFI_ERROR_BUSY = -10,
|
||||
} wifi_error;
|
||||
|
||||
#ifdef CONFIG_RTW_CFGVEDNOR_LLSTATS
|
||||
#define STATS_MAJOR_VERSION 1
|
||||
#define STATS_MINOR_VERSION 0
|
||||
#define STATS_MICRO_VERSION 0
|
||||
|
||||
typedef enum {
|
||||
WIFI_DISCONNECTED = 0,
|
||||
WIFI_AUTHENTICATING = 1,
|
||||
WIFI_ASSOCIATING = 2,
|
||||
WIFI_ASSOCIATED = 3,
|
||||
WIFI_EAPOL_STARTED = 4, // if done by firmware/driver
|
||||
WIFI_EAPOL_COMPLETED = 5, // if done by firmware/driver
|
||||
} wifi_connection_state;
|
||||
|
||||
typedef enum {
|
||||
WIFI_ROAMING_IDLE = 0,
|
||||
WIFI_ROAMING_ACTIVE = 1,
|
||||
} wifi_roam_state;
|
||||
|
||||
typedef enum {
|
||||
WIFI_INTERFACE_STA = 0,
|
||||
WIFI_INTERFACE_SOFTAP = 1,
|
||||
WIFI_INTERFACE_IBSS = 2,
|
||||
WIFI_INTERFACE_P2P_CLIENT = 3,
|
||||
WIFI_INTERFACE_P2P_GO = 4,
|
||||
WIFI_INTERFACE_NAN = 5,
|
||||
WIFI_INTERFACE_MESH = 6,
|
||||
WIFI_INTERFACE_UNKNOWN = -1
|
||||
} wifi_interface_mode;
|
||||
|
||||
#define WIFI_CAPABILITY_QOS 0x00000001 // set for QOS association
|
||||
#define WIFI_CAPABILITY_PROTECTED 0x00000002 // set for protected association (802.11 beacon frame control protected bit set)
|
||||
#define WIFI_CAPABILITY_INTERWORKING 0x00000004 // set if 802.11 Extended Capabilities element interworking bit is set
|
||||
#define WIFI_CAPABILITY_HS20 0x00000008 // set for HS20 association
|
||||
#define WIFI_CAPABILITY_SSID_UTF8 0x00000010 // set is 802.11 Extended Capabilities element UTF-8 SSID bit is set
|
||||
#define WIFI_CAPABILITY_COUNTRY 0x00000020 // set is 802.11 Country Element is present
|
||||
|
||||
typedef struct {
|
||||
wifi_interface_mode mode; // interface mode
|
||||
u8 mac_addr[6]; // interface mac address (self)
|
||||
wifi_connection_state state; // connection state (valid for STA, CLI only)
|
||||
wifi_roam_state roaming; // roaming state
|
||||
u32 capabilities; // WIFI_CAPABILITY_XXX (self)
|
||||
u8 ssid[33]; // null terminated SSID
|
||||
u8 bssid[6]; // bssid
|
||||
u8 ap_country_str[3]; // country string advertised by AP
|
||||
u8 country_str[3]; // country string for this association
|
||||
} wifi_interface_link_layer_info;
|
||||
|
||||
/* channel information */
|
||||
typedef struct {
|
||||
wifi_channel_width width; // channel width (20, 40, 80, 80+80, 160)
|
||||
wifi_channel center_freq; // primary 20 MHz channel
|
||||
wifi_channel center_freq0; // center frequency (MHz) first segment
|
||||
wifi_channel center_freq1; // center frequency (MHz) second segment
|
||||
} wifi_channel_info;
|
||||
|
||||
/* wifi rate */
|
||||
typedef struct {
|
||||
u32 preamble :3; // 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved
|
||||
u32 nss :2; // 0:1x1, 1:2x2, 3:3x3, 4:4x4
|
||||
u32 bw :3; // 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz
|
||||
u32 rateMcsIdx :8; // OFDM/CCK rate code would be as per ieee std in the units of 0.5mbps
|
||||
// HT/VHT it would be mcs index
|
||||
u32 reserved :16; // reserved
|
||||
u32 bitrate; // units of 100 Kbps
|
||||
} wifi_rate;
|
||||
|
||||
/* channel statistics */
|
||||
typedef struct {
|
||||
wifi_channel_info channel; // channel
|
||||
u32 on_time; // msecs the radio is awake (32 bits number accruing over time)
|
||||
u32 cca_busy_time; // msecs the CCA register is busy (32 bits number accruing over time)
|
||||
} wifi_channel_stat;
|
||||
|
||||
// Max number of tx power levels. The actual number vary per device and is specified by |num_tx_levels|
|
||||
#define RADIO_STAT_MAX_TX_LEVELS 256
|
||||
|
||||
/* radio statistics */
|
||||
typedef struct {
|
||||
wifi_radio radio; // wifi radio (if multiple radio supported)
|
||||
u32 on_time; // msecs the radio is awake (32 bits number accruing over time)
|
||||
u32 tx_time; // msecs the radio is transmitting (32 bits number accruing over time)
|
||||
u32 num_tx_levels; // number of radio transmit power levels
|
||||
u32* tx_time_per_levels; // pointer to an array of radio transmit per power levels in
|
||||
// msecs accured over time
|
||||
u32 rx_time; // msecs the radio is in active receive (32 bits number accruing over time)
|
||||
u32 on_time_scan; // msecs the radio is awake due to all scan (32 bits number accruing over time)
|
||||
u32 on_time_nbd; // msecs the radio is awake due to NAN (32 bits number accruing over time)
|
||||
u32 on_time_gscan; // msecs the radio is awake due to G?scan (32 bits number accruing over time)
|
||||
u32 on_time_roam_scan; // msecs the radio is awake due to roam?scan (32 bits number accruing over time)
|
||||
u32 on_time_pno_scan; // msecs the radio is awake due to PNO scan (32 bits number accruing over time)
|
||||
u32 on_time_hs20; // msecs the radio is awake due to HS2.0 scans and GAS exchange (32 bits number accruing over time)
|
||||
u32 num_channels; // number of channels
|
||||
wifi_channel_stat channels[]; // channel statistics
|
||||
} wifi_radio_stat;
|
||||
|
||||
/**
|
||||
* Packet statistics reporting by firmware is performed on MPDU basi (i.e. counters increase by 1 for each MPDU)
|
||||
* As well, "data packet" in associated comments, shall be interpreted as 802.11 data packet,
|
||||
* that is, 802.11 frame control subtype == 2 and excluding management and control frames.
|
||||
*
|
||||
* As an example, in the case of transmission of an MSDU fragmented in 16 MPDUs which are transmitted
|
||||
* OTA in a 16 units long a-mpdu, for which a block ack is received with 5 bits set:
|
||||
* tx_mpdu : shall increase by 5
|
||||
* retries : shall increase by 16
|
||||
* tx_ampdu : shall increase by 1
|
||||
* data packet counters shall not increase regardless of the number of BAR potentially sent by device for this a-mpdu
|
||||
* data packet counters shall not increase regardless of the number of BA received by device for this a-mpdu
|
||||
*
|
||||
* For each subsequent retransmission of the 11 remaining non ACK'ed mpdus
|
||||
* (regardless of the fact that they are transmitted in a-mpdu or not)
|
||||
* retries : shall increase by 1
|
||||
*
|
||||
* If no subsequent BA or ACK are received from AP, until packet lifetime expires for those 11 packet that were not ACK'ed
|
||||
* mpdu_lost : shall increase by 11
|
||||
*/
|
||||
|
||||
/* per rate statistics */
|
||||
typedef struct {
|
||||
wifi_rate rate; // rate information
|
||||
u32 tx_mpdu; // number of successfully transmitted data pkts (ACK rcvd)
|
||||
u32 rx_mpdu; // number of received data pkts
|
||||
u32 mpdu_lost; // number of data packet losses (no ACK)
|
||||
u32 retries; // total number of data pkt retries
|
||||
u32 retries_short; // number of short data pkt retries
|
||||
u32 retries_long; // number of long data pkt retries
|
||||
} wifi_rate_stat;
|
||||
|
||||
/* access categories */
|
||||
typedef enum {
|
||||
WIFI_AC_VO = 0,
|
||||
WIFI_AC_VI = 1,
|
||||
WIFI_AC_BE = 2,
|
||||
WIFI_AC_BK = 3,
|
||||
WIFI_AC_MAX = 4,
|
||||
} wifi_traffic_ac;
|
||||
|
||||
/* wifi peer type */
|
||||
typedef enum
|
||||
{
|
||||
WIFI_PEER_STA,
|
||||
WIFI_PEER_AP,
|
||||
WIFI_PEER_P2P_GO,
|
||||
WIFI_PEER_P2P_CLIENT,
|
||||
WIFI_PEER_NAN,
|
||||
WIFI_PEER_TDLS,
|
||||
WIFI_PEER_INVALID,
|
||||
} wifi_peer_type;
|
||||
|
||||
/* per peer statistics */
|
||||
typedef struct {
|
||||
wifi_peer_type type; // peer type (AP, TDLS, GO etc.)
|
||||
u8 peer_mac_address[6]; // mac address
|
||||
u32 capabilities; // peer WIFI_CAPABILITY_XXX
|
||||
u32 num_rate; // number of rates
|
||||
wifi_rate_stat rate_stats[]; // per rate statistics, number of entries = num_rate
|
||||
} wifi_peer_info;
|
||||
|
||||
/* Per access category statistics */
|
||||
typedef struct {
|
||||
wifi_traffic_ac ac; // access category (VI, VO, BE, BK)
|
||||
u32 tx_mpdu; // number of successfully transmitted unicast data pkts (ACK rcvd)
|
||||
u32 rx_mpdu; // number of received unicast data packets
|
||||
u32 tx_mcast; // number of succesfully transmitted multicast data packets
|
||||
// STA case: implies ACK received from AP for the unicast packet in which mcast pkt was sent
|
||||
u32 rx_mcast; // number of received multicast data packets
|
||||
u32 rx_ampdu; // number of received unicast a-mpdus; support of this counter is optional
|
||||
u32 tx_ampdu; // number of transmitted unicast a-mpdus; support of this counter is optional
|
||||
u32 mpdu_lost; // number of data pkt losses (no ACK)
|
||||
u32 retries; // total number of data pkt retries
|
||||
u32 retries_short; // number of short data pkt retries
|
||||
u32 retries_long; // number of long data pkt retries
|
||||
u32 contention_time_min; // data pkt min contention time (usecs)
|
||||
u32 contention_time_max; // data pkt max contention time (usecs)
|
||||
u32 contention_time_avg; // data pkt avg contention time (usecs)
|
||||
u32 contention_num_samples; // num of data pkts used for contention statistics
|
||||
} wifi_wmm_ac_stat;
|
||||
|
||||
/* interface statistics */
|
||||
typedef struct {
|
||||
wifi_interface_handle iface; // wifi interface
|
||||
wifi_interface_link_layer_info info; // current state of the interface
|
||||
u32 beacon_rx; // access point beacon received count from connected AP
|
||||
u64 average_tsf_offset; // average beacon offset encountered (beacon_TSF - TBTT)
|
||||
// The average_tsf_offset field is used so as to calculate the
|
||||
// typical beacon contention time on the channel as well may be
|
||||
// used to debug beacon synchronization and related power consumption issue
|
||||
u32 leaky_ap_detected; // indicate that this AP typically leaks packets beyond the driver guard time.
|
||||
u32 leaky_ap_avg_num_frames_leaked; // average number of frame leaked by AP after frame with PM bit set was ACK'ed by AP
|
||||
u32 leaky_ap_guard_time; // guard time currently in force (when implementing IEEE power management based on
|
||||
// frame control PM bit), How long driver waits before shutting down the radio and
|
||||
// after receiving an ACK for a data frame with PM bit set)
|
||||
u32 mgmt_rx; // access point mgmt frames received count from connected AP (including Beacon)
|
||||
u32 mgmt_action_rx; // action frames received count
|
||||
u32 mgmt_action_tx; // action frames transmit count
|
||||
wifi_rssi rssi_mgmt; // access Point Beacon and Management frames RSSI (averaged)
|
||||
wifi_rssi rssi_data; // access Point Data Frames RSSI (averaged) from connected AP
|
||||
wifi_rssi rssi_ack; // access Point ACK RSSI (averaged) from connected AP
|
||||
wifi_wmm_ac_stat ac[WIFI_AC_MAX]; // per ac data packet statistics
|
||||
u32 num_peers; // number of peers
|
||||
wifi_peer_info peer_info[]; // per peer statistics
|
||||
} wifi_iface_stat;
|
||||
|
||||
/* configuration params */
|
||||
typedef struct {
|
||||
u32 mpdu_size_threshold; // threshold to classify the pkts as short or long
|
||||
// packet size < mpdu_size_threshold => short
|
||||
u32 aggressive_statistics_gathering; // set for field debug mode. Driver should collect all statistics regardless of performance impact.
|
||||
} wifi_link_layer_params;
|
||||
|
||||
/* callback for reporting link layer stats */
|
||||
typedef struct {
|
||||
void (*on_link_stats_results) (wifi_request_id id, wifi_iface_stat *iface_stat,
|
||||
int num_radios, wifi_radio_stat *radio_stat);
|
||||
} wifi_stats_result_handler;
|
||||
|
||||
|
||||
/* wifi statistics bitmap */
|
||||
#define WIFI_STATS_RADIO 0x00000001 // all radio statistics
|
||||
#define WIFI_STATS_RADIO_CCA 0x00000002 // cca_busy_time (within radio statistics)
|
||||
#define WIFI_STATS_RADIO_CHANNELS 0x00000004 // all channel statistics (within radio statistics)
|
||||
#define WIFI_STATS_RADIO_SCAN 0x00000008 // all scan statistics (within radio statistics)
|
||||
#define WIFI_STATS_IFACE 0x00000010 // all interface statistics
|
||||
#define WIFI_STATS_IFACE_TXRATE 0x00000020 // all tx rate statistics (within interface statistics)
|
||||
#define WIFI_STATS_IFACE_AC 0x00000040 // all ac statistics (within interface statistics)
|
||||
#define WIFI_STATS_IFACE_CONTENTION 0x00000080 // all contention (min, max, avg) statistics (within ac statisctics)
|
||||
|
||||
#endif /* CONFIG_RTW_CFGVEDNOR_LLSTATS */
|
||||
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(RTW_VENDOR_EXT_SUPPORT)
|
||||
extern int rtw_cfgvendor_attach(struct wiphy *wiphy);
|
||||
extern int rtw_cfgvendor_detach(struct wiphy *wiphy);
|
||||
extern int rtw_cfgvendor_send_async_event(struct wiphy *wiphy,
|
||||
struct net_device *dev, int event_id, const void *data, int len);
|
||||
#if defined(GSCAN_SUPPORT) && 0
|
||||
extern int rtw_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
|
||||
struct net_device *dev, void *data, int len, rtw_vendor_event_t event);
|
||||
#endif
|
||||
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) || defined(RTW_VENDOR_EXT_SUPPORT) */
|
||||
|
||||
#endif /* _RTW_CFGVENDOR_H_ */
|
||||
4191
os_dep/linux/rtw_proc.c
Normal file
4191
os_dep/linux/rtw_proc.c
Normal file
File diff suppressed because it is too large
Load Diff
60
os_dep/linux/rtw_proc.h
Normal file
60
os_dep/linux/rtw_proc.h
Normal file
@@ -0,0 +1,60 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2007 - 2017 Realtek Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __RTW_PROC_H__
|
||||
#define __RTW_PROC_H__
|
||||
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#define RTW_PROC_HDL_TYPE_SEQ 0
|
||||
#define RTW_PROC_HDL_TYPE_SSEQ 1
|
||||
|
||||
struct rtw_proc_hdl {
|
||||
char *name;
|
||||
u8 type;
|
||||
union {
|
||||
int (*show)(struct seq_file *, void *);
|
||||
struct seq_operations *seq_op;
|
||||
} u;
|
||||
ssize_t (*write)(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
|
||||
};
|
||||
|
||||
#define RTW_PROC_HDL_SEQ(_name, _seq_op, _write) \
|
||||
{ .name = _name, .type = RTW_PROC_HDL_TYPE_SEQ, .u.seq_op = _seq_op, .write = _write}
|
||||
|
||||
#define RTW_PROC_HDL_SSEQ(_name, _show, _write) \
|
||||
{ .name = _name, .type = RTW_PROC_HDL_TYPE_SSEQ, .u.show = _show, .write = _write}
|
||||
|
||||
#ifdef CONFIG_PROC_DEBUG
|
||||
|
||||
struct proc_dir_entry *get_rtw_drv_proc(void);
|
||||
int rtw_drv_proc_init(void);
|
||||
void rtw_drv_proc_deinit(void);
|
||||
struct proc_dir_entry *rtw_adapter_proc_init(struct net_device *dev);
|
||||
void rtw_adapter_proc_deinit(struct net_device *dev);
|
||||
void rtw_adapter_proc_replace(struct net_device *dev);
|
||||
|
||||
#else /* !CONFIG_PROC_DEBUG */
|
||||
|
||||
#define get_rtw_drv_proc() NULL
|
||||
#define rtw_drv_proc_init() 0
|
||||
#define rtw_drv_proc_deinit() do {} while (0)
|
||||
#define rtw_adapter_proc_init(dev) NULL
|
||||
#define rtw_adapter_proc_deinit(dev) do {} while (0)
|
||||
#define rtw_adapter_proc_replace(dev) do {} while (0)
|
||||
|
||||
#endif /* !CONFIG_PROC_DEBUG */
|
||||
|
||||
#endif /* __RTW_PROC_H__ */
|
||||
74
os_dep/linux/rtw_rhashtable.c
Normal file
74
os_dep/linux/rtw_rhashtable.c
Normal file
@@ -0,0 +1,74 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2007 - 2017 Realtek Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_RTW_MESH /* for now, only promised for kernel versions we support mesh */
|
||||
|
||||
#include <drv_types.h>
|
||||
|
||||
int rtw_rhashtable_walk_enter(rtw_rhashtable *ht, rtw_rhashtable_iter *iter)
|
||||
{
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
|
||||
return rhashtable_walk_init((ht), (iter), GFP_ATOMIC);
|
||||
#else
|
||||
/* kernel >= 4.4.0 rhashtable_walk_init use GFP_KERNEL to alloc, spin_lock for assignment */
|
||||
iter->ht = ht;
|
||||
iter->p = NULL;
|
||||
iter->slot = 0;
|
||||
iter->skip = 0;
|
||||
|
||||
iter->walker = kmalloc(sizeof(*iter->walker), GFP_ATOMIC);
|
||||
if (!iter->walker)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&ht->lock);
|
||||
iter->walker->tbl =
|
||||
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
|
||||
list_add(&iter->walker->list, &iter->walker->tbl->walkers);
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0))
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
|
||||
static inline int is_vmalloc_addr(const void *x)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long addr = (unsigned long)x;
|
||||
|
||||
return addr >= VMALLOC_START && addr < VMALLOC_END;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)) */
|
||||
|
||||
void kvfree(const void *addr)
|
||||
{
|
||||
if (is_vmalloc_addr(addr))
|
||||
vfree(addr);
|
||||
else
|
||||
kfree(addr);
|
||||
}
|
||||
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) */
|
||||
|
||||
#include "rhashtable.c"
|
||||
|
||||
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */
|
||||
|
||||
#endif /* CONFIG_RTW_MESH */
|
||||
|
||||
55
os_dep/linux/rtw_rhashtable.h
Normal file
55
os_dep/linux/rtw_rhashtable.h
Normal file
@@ -0,0 +1,55 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2007 - 2017 Realtek Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __RTW_RHASHTABLE_H__
|
||||
#define __RTW_RHASHTABLE_H__
|
||||
|
||||
#ifdef CONFIG_RTW_MESH /* for now, only promised for kernel versions we support mesh */
|
||||
|
||||
/* directly reference rhashtable in kernel */
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
|
||||
#include <linux/rhashtable.h>
|
||||
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) */
|
||||
|
||||
/* Use rhashtable from kernel 4.4 */
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
|
||||
#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
|
||||
#endif
|
||||
#include "rhashtable.h"
|
||||
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */
|
||||
|
||||
typedef struct rhashtable rtw_rhashtable;
|
||||
typedef struct rhash_head rtw_rhash_head;
|
||||
typedef struct rhashtable_params rtw_rhashtable_params;
|
||||
|
||||
#define rtw_rhashtable_init(ht, params) rhashtable_init(ht, params)
|
||||
|
||||
typedef struct rhashtable_iter rtw_rhashtable_iter;
|
||||
|
||||
int rtw_rhashtable_walk_enter(rtw_rhashtable *ht, rtw_rhashtable_iter *iter);
|
||||
#define rtw_rhashtable_walk_exit(iter) rhashtable_walk_exit(iter)
|
||||
#define rtw_rhashtable_walk_start(iter) rhashtable_walk_start(iter)
|
||||
#define rtw_rhashtable_walk_next(iter) rhashtable_walk_next(iter)
|
||||
#define rtw_rhashtable_walk_stop(iter) rhashtable_walk_stop(iter)
|
||||
|
||||
#define rtw_rhashtable_free_and_destroy(ht, free_fn, arg) rhashtable_free_and_destroy((ht), (free_fn), (arg))
|
||||
#define rtw_rhashtable_lookup_fast(ht, key, params) rhashtable_lookup_fast((ht), (key), (params))
|
||||
#define rtw_rhashtable_lookup_insert_fast(ht, obj, params) rhashtable_lookup_insert_fast((ht), (obj), (params))
|
||||
#define rtw_rhashtable_remove_fast(ht, obj, params) rhashtable_remove_fast((ht), (obj), (params))
|
||||
|
||||
#endif /* CONFIG_RTW_MESH */
|
||||
|
||||
#endif /* __RTW_RHASHTABLE_H__ */
|
||||
|
||||
1657
os_dep/linux/usb_intf.c
Normal file
1657
os_dep/linux/usb_intf.c
Normal file
File diff suppressed because it is too large
Load Diff
1138
os_dep/linux/usb_ops_linux.c
Normal file
1138
os_dep/linux/usb_ops_linux.c
Normal file
File diff suppressed because it is too large
Load Diff
425
os_dep/linux/wifi_regd.c
Normal file
425
os_dep/linux/wifi_regd.c
Normal file
@@ -0,0 +1,425 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2009-2010 - 2017 Realtek Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include <drv_types.h>
|
||||
|
||||
#ifdef CONFIG_IOCTL_CFG80211
|
||||
|
||||
#include <rtw_wifi_regd.h>
|
||||
|
||||
static struct country_code_to_enum_rd allCountries[] = {
|
||||
{COUNTRY_CODE_USER, "RD"},
|
||||
};
|
||||
|
||||
/*
|
||||
* REG_RULE(freq start, freq end, bandwidth, max gain, eirp, reg_flags)
|
||||
*/
|
||||
|
||||
/*
|
||||
*Only these channels all allow active
|
||||
*scan on all world regulatory domains
|
||||
*/
|
||||
|
||||
/* 2G chan 01 - chan 11 */
|
||||
#define RTW_2GHZ_CH01_11 \
|
||||
REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
|
||||
|
||||
/*
|
||||
*We enable active scan on these a case
|
||||
*by case basis by regulatory domain
|
||||
*/
|
||||
|
||||
/* 2G chan 12 - chan 13, PASSIV SCAN */
|
||||
#define RTW_2GHZ_CH12_13 \
|
||||
REG_RULE(2467-10, 2472+10, 40, 0, 20, \
|
||||
NL80211_RRF_PASSIVE_SCAN)
|
||||
|
||||
/* 2G chan 14, PASSIVS SCAN, NO OFDM (B only) */
|
||||
#define RTW_2GHZ_CH14 \
|
||||
REG_RULE(2484-10, 2484+10, 40, 0, 20, \
|
||||
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
|
||||
|
||||
/* 5G chan 36 - chan 64 */
|
||||
#define RTW_5GHZ_5150_5350 \
|
||||
REG_RULE(5150-10, 5350+10, 40, 0, 30, \
|
||||
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
|
||||
|
||||
/* 5G chan 100 - chan 165 */
|
||||
#define RTW_5GHZ_5470_5850 \
|
||||
REG_RULE(5470-10, 5850+10, 40, 0, 30, \
|
||||
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
|
||||
|
||||
/* 5G chan 149 - chan 165 */
|
||||
#define RTW_5GHZ_5725_5850 \
|
||||
REG_RULE(5725-10, 5850+10, 40, 0, 30, \
|
||||
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
|
||||
|
||||
/* 5G chan 36 - chan 165 */
|
||||
#define RTW_5GHZ_5150_5850 \
|
||||
REG_RULE(5150-10, 5850+10, 40, 0, 30, \
|
||||
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
|
||||
|
||||
static const struct ieee80211_regdomain rtw_regdom_rd = {
|
||||
.n_reg_rules = 3,
|
||||
.alpha2 = "99",
|
||||
.reg_rules = {
|
||||
RTW_2GHZ_CH01_11,
|
||||
RTW_2GHZ_CH12_13,
|
||||
RTW_5GHZ_5150_5850,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct ieee80211_regdomain rtw_regdom_11 = {
|
||||
.n_reg_rules = 1,
|
||||
.alpha2 = "99",
|
||||
.reg_rules = {
|
||||
RTW_2GHZ_CH01_11,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct ieee80211_regdomain rtw_regdom_12_13 = {
|
||||
.n_reg_rules = 2,
|
||||
.alpha2 = "99",
|
||||
.reg_rules = {
|
||||
RTW_2GHZ_CH01_11,
|
||||
RTW_2GHZ_CH12_13,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct ieee80211_regdomain rtw_regdom_no_midband = {
|
||||
.n_reg_rules = 3,
|
||||
.alpha2 = "99",
|
||||
.reg_rules = {
|
||||
RTW_2GHZ_CH01_11,
|
||||
RTW_5GHZ_5150_5350,
|
||||
RTW_5GHZ_5725_5850,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct ieee80211_regdomain rtw_regdom_60_64 = {
|
||||
.n_reg_rules = 3,
|
||||
.alpha2 = "99",
|
||||
.reg_rules = {
|
||||
RTW_2GHZ_CH01_11,
|
||||
RTW_2GHZ_CH12_13,
|
||||
RTW_5GHZ_5725_5850,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct ieee80211_regdomain rtw_regdom_14_60_64 = {
|
||||
.n_reg_rules = 4,
|
||||
.alpha2 = "99",
|
||||
.reg_rules = {
|
||||
RTW_2GHZ_CH01_11,
|
||||
RTW_2GHZ_CH12_13,
|
||||
RTW_2GHZ_CH14,
|
||||
RTW_5GHZ_5725_5850,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct ieee80211_regdomain rtw_regdom_14 = {
|
||||
.n_reg_rules = 3,
|
||||
.alpha2 = "99",
|
||||
.reg_rules = {
|
||||
RTW_2GHZ_CH01_11,
|
||||
RTW_2GHZ_CH12_13,
|
||||
RTW_2GHZ_CH14,
|
||||
}
|
||||
};
|
||||
|
||||
#if 0
|
||||
static struct rtw_regulatory *rtw_regd;
|
||||
#endif
|
||||
|
||||
#if 0 /* not_yet */
|
||||
static void _rtw_reg_apply_beaconing_flags(struct wiphy *wiphy,
|
||||
enum nl80211_reg_initiator initiator)
|
||||
{
|
||||
enum nl80211_band band;
|
||||
struct ieee80211_supported_band *sband;
|
||||
const struct ieee80211_reg_rule *reg_rule;
|
||||
struct ieee80211_channel *ch;
|
||||
unsigned int i;
|
||||
u32 bandwidth = 0;
|
||||
int r;
|
||||
|
||||
for (band = 0; band < NUM_NL80211_BANDS; band++) {
|
||||
|
||||
if (!wiphy->bands[band])
|
||||
continue;
|
||||
|
||||
sband = wiphy->bands[band];
|
||||
|
||||
for (i = 0; i < sband->n_channels; i++) {
|
||||
ch = &sband->channels[i];
|
||||
if (rtw_is_dfs_ch(ch->hw_value) ||
|
||||
(ch->flags & IEEE80211_CHAN_RADAR))
|
||||
continue;
|
||||
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
|
||||
r = freq_reg_info(wiphy, ch->center_freq,
|
||||
bandwidth, ®_rule);
|
||||
if (r)
|
||||
continue;
|
||||
|
||||
/*
|
||||
*If 11d had a rule for this channel ensure
|
||||
*we enable adhoc/beaconing if it allows us to
|
||||
*use it. Note that we would have disabled it
|
||||
*by applying our static world regdomain by
|
||||
*default during init, prior to calling our
|
||||
*regulatory_hint().
|
||||
*/
|
||||
|
||||
if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
|
||||
ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
|
||||
if (!
|
||||
(reg_rule->flags &
|
||||
NL80211_RRF_PASSIVE_SCAN))
|
||||
ch->flags &=
|
||||
~IEEE80211_CHAN_PASSIVE_SCAN;
|
||||
} else {
|
||||
if (ch->beacon_found)
|
||||
ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
|
||||
IEEE80211_CHAN_PASSIVE_SCAN);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Allows active scan scan on Ch 12 and 13 */
|
||||
static void _rtw_reg_apply_active_scan_flags(struct wiphy *wiphy,
|
||||
enum nl80211_reg_initiator
|
||||
initiator)
|
||||
{
|
||||
struct ieee80211_supported_band *sband;
|
||||
struct ieee80211_channel *ch;
|
||||
const struct ieee80211_reg_rule *reg_rule;
|
||||
u32 bandwidth = 0;
|
||||
int r;
|
||||
|
||||
if (!wiphy->bands[NL80211_BAND_2GHZ])
|
||||
return;
|
||||
sband = wiphy->bands[NL80211_BAND_2GHZ];
|
||||
|
||||
/*
|
||||
* If no country IE has been received always enable active scan
|
||||
* on these channels. This is only done for specific regulatory SKUs
|
||||
*/
|
||||
if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
|
||||
ch = &sband->channels[11]; /* CH 12 */
|
||||
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
|
||||
ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
|
||||
ch = &sband->channels[12]; /* CH 13 */
|
||||
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
|
||||
ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a country IE has been received check its rule for this
|
||||
* channel first before enabling active scan. The passive scan
|
||||
* would have been enforced by the initial processing of our
|
||||
* custom regulatory domain.
|
||||
*/
|
||||
|
||||
ch = &sband->channels[11]; /* CH 12 */
|
||||
r = freq_reg_info(wiphy, ch->center_freq, bandwidth, ®_rule);
|
||||
if (!r) {
|
||||
if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
|
||||
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
|
||||
ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
|
||||
}
|
||||
|
||||
ch = &sband->channels[12]; /* CH 13 */
|
||||
r = freq_reg_info(wiphy, ch->center_freq, bandwidth, ®_rule);
|
||||
if (!r) {
|
||||
if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
|
||||
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
|
||||
ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void rtw_regd_apply_flags(struct wiphy *wiphy)
|
||||
{
|
||||
_adapter *padapter = wiphy_to_adapter(wiphy);
|
||||
struct rf_ctl_t *rfctl = adapter_to_rfctl(padapter);
|
||||
u8 channel_plan = rfctl->ChannelPlan;
|
||||
RT_CHANNEL_INFO *channel_set = rfctl->channel_set;
|
||||
u8 max_chan_nums = rfctl->max_chan_nums;
|
||||
|
||||
struct ieee80211_supported_band *sband;
|
||||
struct ieee80211_channel *ch;
|
||||
unsigned int i, j;
|
||||
u16 channel;
|
||||
u32 freq;
|
||||
|
||||
/* all channels disable */
|
||||
for (i = 0; i < NUM_NL80211_BANDS; i++) {
|
||||
sband = wiphy->bands[i];
|
||||
|
||||
if (sband) {
|
||||
for (j = 0; j < sband->n_channels; j++) {
|
||||
ch = &sband->channels[j];
|
||||
|
||||
if (ch)
|
||||
ch->flags = IEEE80211_CHAN_DISABLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* channels apply by channel plans. */
|
||||
for (i = 0; i < max_chan_nums; i++) {
|
||||
channel = channel_set[i].ChannelNum;
|
||||
freq = rtw_ch2freq(channel);
|
||||
|
||||
ch = ieee80211_get_channel(wiphy, freq);
|
||||
if (!ch)
|
||||
continue;
|
||||
|
||||
if (channel_set[i].ScanType == SCAN_PASSIVE
|
||||
#if defined(CONFIG_DFS_MASTER)
|
||||
&& rtw_odm_dfs_domain_unknown(padapter)
|
||||
#endif
|
||||
) {
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
|
||||
ch->flags = (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN);
|
||||
#else
|
||||
ch->flags = IEEE80211_CHAN_NO_IR;
|
||||
#endif
|
||||
} else
|
||||
ch->flags = 0;
|
||||
|
||||
#ifdef CONFIG_DFS
|
||||
if (rtw_is_dfs_ch(ch->hw_value)
|
||||
#if defined(CONFIG_DFS_MASTER)
|
||||
&& rtw_odm_dfs_domain_unknown(padapter)
|
||||
#endif
|
||||
) {
|
||||
ch->flags |= IEEE80211_CHAN_RADAR;
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
|
||||
ch->flags |= (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN);
|
||||
#else
|
||||
ch->flags |= IEEE80211_CHAN_NO_IR;
|
||||
#endif
|
||||
}
|
||||
#endif /* CONFIG_DFS */
|
||||
}
|
||||
}
|
||||
|
||||
static const struct ieee80211_regdomain *_rtw_regdomain_select(struct
|
||||
rtw_regulatory
|
||||
*reg)
|
||||
{
|
||||
#if 0
|
||||
switch (reg->country_code) {
|
||||
case COUNTRY_CODE_USER:
|
||||
default:
|
||||
return &rtw_regdom_rd;
|
||||
}
|
||||
#else
|
||||
return &rtw_regdom_rd;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
|
||||
{
|
||||
switch (request->initiator) {
|
||||
case NL80211_REGDOM_SET_BY_DRIVER:
|
||||
RTW_INFO("%s: %s\n", __func__, "NL80211_REGDOM_SET_BY_DRIVER");
|
||||
break;
|
||||
case NL80211_REGDOM_SET_BY_CORE:
|
||||
RTW_INFO("%s: %s\n", __func__, "NL80211_REGDOM_SET_BY_CORE");
|
||||
break;
|
||||
case NL80211_REGDOM_SET_BY_USER:
|
||||
RTW_INFO("%s: %s alpha2:%c%c\n", __func__, "NL80211_REGDOM_SET_BY_USER"
|
||||
, request->alpha2[0], request->alpha2[1]);
|
||||
rtw_set_country(wiphy_to_adapter(wiphy), request->alpha2);
|
||||
break;
|
||||
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
|
||||
RTW_INFO("%s: %s\n", __func__, "NL80211_REGDOM_SET_BY_COUNTRY_IE");
|
||||
break;
|
||||
}
|
||||
|
||||
rtw_regd_apply_flags(wiphy);
|
||||
}
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
|
||||
static int rtw_reg_notifier_return(struct wiphy *wiphy, struct regulatory_request *request)
|
||||
{
|
||||
rtw_reg_notifier(wiphy, request);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void _rtw_regd_init_wiphy(struct rtw_regulatory *reg, struct wiphy *wiphy)
|
||||
{
|
||||
const struct ieee80211_regdomain *regd;
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
|
||||
wiphy->reg_notifier = rtw_reg_notifier_return;
|
||||
#else
|
||||
wiphy->reg_notifier = rtw_reg_notifier;
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
|
||||
wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
|
||||
wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY;
|
||||
wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS;
|
||||
#else
|
||||
wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
|
||||
wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
|
||||
wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
|
||||
#endif
|
||||
|
||||
regd = _rtw_regdomain_select(reg);
|
||||
wiphy_apply_custom_regulatory(wiphy, regd);
|
||||
|
||||
rtw_regd_apply_flags(wiphy);
|
||||
}
|
||||
|
||||
static struct country_code_to_enum_rd *_rtw_regd_find_country(u16 countrycode)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
|
||||
if (allCountries[i].countrycode == countrycode)
|
||||
return &allCountries[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int rtw_regd_init(struct wiphy *wiphy)
|
||||
{
|
||||
#if 0
|
||||
if (rtw_regd == NULL) {
|
||||
rtw_regd = (struct rtw_regulatory *)
|
||||
rtw_malloc(sizeof(struct rtw_regulatory));
|
||||
|
||||
rtw_regd->alpha2[0] = '9';
|
||||
rtw_regd->alpha2[1] = '9';
|
||||
|
||||
rtw_regd->country_code = COUNTRY_CODE_USER;
|
||||
}
|
||||
|
||||
RTW_INFO("%s: Country alpha2 being used: %c%c\n",
|
||||
__func__, rtw_regd->alpha2[0], rtw_regd->alpha2[1]);
|
||||
#endif
|
||||
|
||||
_rtw_regd_init_wiphy(NULL, wiphy);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_IOCTL_CFG80211 */
|
||||
536
os_dep/linux/xmit_linux.c
Normal file
536
os_dep/linux/xmit_linux.c
Normal file
@@ -0,0 +1,536 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2007 - 2017 Realtek Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#define _XMIT_OSDEP_C_
|
||||
|
||||
#include <drv_types.h>
|
||||
|
||||
#define DBG_DUMP_OS_QUEUE_CTL 0
|
||||
|
||||
uint rtw_remainder_len(struct pkt_file *pfile)
|
||||
{
|
||||
return pfile->buf_len - ((SIZE_PTR)(pfile->cur_addr) - (SIZE_PTR)(pfile->buf_start));
|
||||
}
|
||||
|
||||
void _rtw_open_pktfile(_pkt *pktptr, struct pkt_file *pfile)
|
||||
{
|
||||
|
||||
pfile->pkt = pktptr;
|
||||
pfile->cur_addr = pfile->buf_start = pktptr->data;
|
||||
pfile->pkt_len = pfile->buf_len = pktptr->len;
|
||||
|
||||
pfile->cur_buffer = pfile->buf_start ;
|
||||
|
||||
}
|
||||
|
||||
uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
|
||||
{
|
||||
uint len = 0;
|
||||
|
||||
|
||||
len = rtw_remainder_len(pfile);
|
||||
len = (rlen > len) ? len : rlen;
|
||||
|
||||
if (rmem)
|
||||
skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len, rmem, len);
|
||||
|
||||
pfile->cur_addr += len;
|
||||
pfile->pkt_len -= len;
|
||||
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
sint rtw_endofpktfile(struct pkt_file *pfile)
|
||||
{
|
||||
|
||||
if (pfile->pkt_len == 0) {
|
||||
return _TRUE;
|
||||
}
|
||||
|
||||
|
||||
return _FALSE;
|
||||
}
|
||||
|
||||
void rtw_set_tx_chksum_offload(_pkt *pkt, struct pkt_attrib *pattrib)
|
||||
{
|
||||
|
||||
#ifdef CONFIG_TCP_CSUM_OFFLOAD_TX
|
||||
struct sk_buff *skb = (struct sk_buff *)pkt;
|
||||
pattrib->hw_tcp_csum = 0;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
if (skb_shinfo(skb)->nr_frags == 0) {
|
||||
const struct iphdr *ip = ip_hdr(skb);
|
||||
if (ip->protocol == IPPROTO_TCP) {
|
||||
/* TCP checksum offload by HW */
|
||||
RTW_INFO("CHECKSUM_PARTIAL TCP\n");
|
||||
pattrib->hw_tcp_csum = 1;
|
||||
/* skb_checksum_help(skb); */
|
||||
} else if (ip->protocol == IPPROTO_UDP) {
|
||||
/* RTW_INFO("CHECKSUM_PARTIAL UDP\n"); */
|
||||
#if 1
|
||||
skb_checksum_help(skb);
|
||||
#else
|
||||
/* Set UDP checksum = 0 to skip checksum check */
|
||||
struct udphdr *udp = skb_transport_header(skb);
|
||||
udp->check = 0;
|
||||
#endif
|
||||
} else {
|
||||
RTW_INFO("%s-%d TCP CSUM offload Error!!\n", __FUNCTION__, __LINE__);
|
||||
WARN_ON(1); /* we need a WARN() */
|
||||
}
|
||||
} else { /* IP fragmentation case */
|
||||
RTW_INFO("%s-%d nr_frags != 0, using skb_checksum_help(skb);!!\n", __FUNCTION__, __LINE__);
|
||||
skb_checksum_help(skb);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
int rtw_os_xmit_resource_alloc(_adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz, u8 flag)
|
||||
{
|
||||
if (alloc_sz > 0) {
|
||||
#ifdef CONFIG_USE_USB_BUFFER_ALLOC_TX
|
||||
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
|
||||
struct usb_device *pusbd = pdvobjpriv->pusbdev;
|
||||
|
||||
pxmitbuf->pallocated_buf = rtw_usb_buffer_alloc(pusbd, (size_t)alloc_sz, &pxmitbuf->dma_transfer_addr);
|
||||
pxmitbuf->pbuf = pxmitbuf->pallocated_buf;
|
||||
if (pxmitbuf->pallocated_buf == NULL)
|
||||
return _FAIL;
|
||||
#else /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
|
||||
|
||||
pxmitbuf->pallocated_buf = rtw_zmalloc(alloc_sz);
|
||||
if (pxmitbuf->pallocated_buf == NULL)
|
||||
return _FAIL;
|
||||
|
||||
pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
|
||||
|
||||
#endif /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
|
||||
}
|
||||
|
||||
if (flag) {
|
||||
#ifdef CONFIG_USB_HCI
|
||||
int i;
|
||||
for (i = 0; i < 8; i++) {
|
||||
pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (pxmitbuf->pxmit_urb[i] == NULL) {
|
||||
RTW_INFO("pxmitbuf->pxmit_urb[i]==NULL");
|
||||
return _FAIL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return _SUCCESS;
|
||||
}
|
||||
|
||||
void rtw_os_xmit_resource_free(_adapter *padapter, struct xmit_buf *pxmitbuf, u32 free_sz, u8 flag)
|
||||
{
|
||||
if (flag) {
|
||||
#ifdef CONFIG_USB_HCI
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (pxmitbuf->pxmit_urb[i]) {
|
||||
/* usb_kill_urb(pxmitbuf->pxmit_urb[i]); */
|
||||
usb_free_urb(pxmitbuf->pxmit_urb[i]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (free_sz > 0) {
|
||||
#ifdef CONFIG_USE_USB_BUFFER_ALLOC_TX
|
||||
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
|
||||
struct usb_device *pusbd = pdvobjpriv->pusbdev;
|
||||
|
||||
rtw_usb_buffer_free(pusbd, (size_t)free_sz, pxmitbuf->pallocated_buf, pxmitbuf->dma_transfer_addr);
|
||||
pxmitbuf->pallocated_buf = NULL;
|
||||
pxmitbuf->dma_transfer_addr = 0;
|
||||
#else /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
|
||||
if (pxmitbuf->pallocated_buf)
|
||||
rtw_mfree(pxmitbuf->pallocated_buf, free_sz);
|
||||
#endif /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
|
||||
}
|
||||
}
|
||||
|
||||
void dump_os_queue(void *sel, _adapter *padapter)
|
||||
{
|
||||
struct net_device *ndev = padapter->pnetdev;
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
RTW_PRINT_SEL(sel, "os_queue[%d]:%s\n"
|
||||
, i, __netif_subqueue_stopped(ndev, i) ? "stopped" : "waked");
|
||||
}
|
||||
#else
|
||||
RTW_PRINT_SEL(sel, "os_queue:%s\n"
|
||||
, netif_queue_stopped(ndev) ? "stopped" : "waked");
|
||||
#endif
|
||||
}
|
||||
|
||||
#define WMM_XMIT_THRESHOLD (NR_XMITFRAME*2/5)
|
||||
|
||||
static inline bool rtw_os_need_wake_queue(_adapter *padapter, u16 qidx)
|
||||
{
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
|
||||
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
|
||||
|
||||
if (padapter->registrypriv.wifi_spec) {
|
||||
if (pxmitpriv->hwxmits[qidx].accnt < WMM_XMIT_THRESHOLD)
|
||||
return _TRUE;
|
||||
#ifdef DBG_CONFIG_ERROR_DETECT
|
||||
#ifdef DBG_CONFIG_ERROR_RESET
|
||||
} else if (rtw_hal_sreset_inprogress(padapter) == _TRUE) {
|
||||
return _FALSE;
|
||||
#endif/* #ifdef DBG_CONFIG_ERROR_RESET */
|
||||
#endif/* #ifdef DBG_CONFIG_ERROR_DETECT */
|
||||
} else {
|
||||
#ifdef CONFIG_MCC_MODE
|
||||
if (MCC_EN(padapter)) {
|
||||
if (rtw_hal_check_mcc_status(padapter, MCC_STATUS_DOING_MCC)
|
||||
&& MCC_STOP(padapter))
|
||||
return _FALSE;
|
||||
}
|
||||
#endif /* CONFIG_MCC_MODE */
|
||||
return _TRUE;
|
||||
}
|
||||
return _FALSE;
|
||||
#else
|
||||
#ifdef CONFIG_MCC_MODE
|
||||
if (MCC_EN(padapter)) {
|
||||
if (rtw_hal_check_mcc_status(padapter, MCC_STATUS_DOING_MCC)
|
||||
&& MCC_STOP(padapter))
|
||||
return _FALSE;
|
||||
}
|
||||
#endif /* CONFIG_MCC_MODE */
|
||||
return _TRUE;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool rtw_os_need_stop_queue(_adapter *padapter, u16 qidx)
|
||||
{
|
||||
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
|
||||
if (padapter->registrypriv.wifi_spec) {
|
||||
/* No free space for Tx, tx_worker is too slow */
|
||||
if (pxmitpriv->hwxmits[qidx].accnt > WMM_XMIT_THRESHOLD)
|
||||
return _TRUE;
|
||||
} else {
|
||||
if (pxmitpriv->free_xmitframe_cnt <= 4)
|
||||
return _TRUE;
|
||||
}
|
||||
#else
|
||||
if (pxmitpriv->free_xmitframe_cnt <= 4)
|
||||
return _TRUE;
|
||||
#endif
|
||||
return _FALSE;
|
||||
}
|
||||
|
||||
void rtw_os_pkt_complete(_adapter *padapter, _pkt *pkt)
|
||||
{
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
|
||||
u16 qidx;
|
||||
|
||||
qidx = skb_get_queue_mapping(pkt);
|
||||
if (rtw_os_need_wake_queue(padapter, qidx)) {
|
||||
if (DBG_DUMP_OS_QUEUE_CTL)
|
||||
RTW_INFO(FUNC_ADPT_FMT": netif_wake_subqueue[%d]\n", FUNC_ADPT_ARG(padapter), qidx);
|
||||
netif_wake_subqueue(padapter->pnetdev, qidx);
|
||||
}
|
||||
#else
|
||||
if (rtw_os_need_wake_queue(padapter, 0)) {
|
||||
if (DBG_DUMP_OS_QUEUE_CTL)
|
||||
RTW_INFO(FUNC_ADPT_FMT": netif_wake_queue\n", FUNC_ADPT_ARG(padapter));
|
||||
netif_wake_queue(padapter->pnetdev);
|
||||
}
|
||||
#endif
|
||||
|
||||
rtw_skb_free(pkt);
|
||||
}
|
||||
|
||||
void rtw_os_xmit_complete(_adapter *padapter, struct xmit_frame *pxframe)
|
||||
{
|
||||
if (pxframe->pkt)
|
||||
rtw_os_pkt_complete(padapter, pxframe->pkt);
|
||||
|
||||
pxframe->pkt = NULL;
|
||||
}
|
||||
|
||||
void rtw_os_xmit_schedule(_adapter *padapter)
|
||||
{
|
||||
#if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
|
||||
_adapter *pri_adapter = GET_PRIMARY_ADAPTER(padapter);
|
||||
|
||||
if (!padapter)
|
||||
return;
|
||||
|
||||
if (_rtw_queue_empty(&padapter->xmitpriv.pending_xmitbuf_queue) == _FALSE)
|
||||
_rtw_up_sema(&pri_adapter->xmitpriv.xmit_sema);
|
||||
|
||||
|
||||
#else
|
||||
_irqL irqL;
|
||||
struct xmit_priv *pxmitpriv;
|
||||
|
||||
if (!padapter)
|
||||
return;
|
||||
|
||||
pxmitpriv = &padapter->xmitpriv;
|
||||
|
||||
_enter_critical_bh(&pxmitpriv->lock, &irqL);
|
||||
|
||||
if (rtw_txframes_pending(padapter))
|
||||
tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
|
||||
|
||||
_exit_critical_bh(&pxmitpriv->lock, &irqL);
|
||||
|
||||
#if defined(CONFIG_PCI_HCI) && defined(CONFIG_XMIT_THREAD_MODE)
|
||||
if (_rtw_queue_empty(&padapter->xmitpriv.pending_xmitbuf_queue) == _FALSE)
|
||||
_rtw_up_sema(&padapter->xmitpriv.xmit_sema);
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool rtw_check_xmit_resource(_adapter *padapter, _pkt *pkt)
|
||||
{
|
||||
bool busy = _FALSE;
|
||||
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
|
||||
u16 qidx;
|
||||
|
||||
qidx = skb_get_queue_mapping(pkt);
|
||||
if (rtw_os_need_stop_queue(padapter, qidx)) {
|
||||
if (DBG_DUMP_OS_QUEUE_CTL)
|
||||
RTW_INFO(FUNC_ADPT_FMT": netif_stop_subqueue[%d]\n", FUNC_ADPT_ARG(padapter), qidx);
|
||||
netif_stop_subqueue(padapter->pnetdev, qidx);
|
||||
busy = _TRUE;
|
||||
}
|
||||
#else
|
||||
if (rtw_os_need_stop_queue(padapter, 0)) {
|
||||
if (DBG_DUMP_OS_QUEUE_CTL)
|
||||
RTW_INFO(FUNC_ADPT_FMT": netif_stop_queue\n", FUNC_ADPT_ARG(padapter));
|
||||
rtw_netif_stop_queue(padapter->pnetdev);
|
||||
busy = _TRUE;
|
||||
}
|
||||
#endif
|
||||
return busy;
|
||||
}
|
||||
|
||||
void rtw_os_wake_queue_at_free_stainfo(_adapter *padapter, int *qcnt_freed)
|
||||
{
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (qcnt_freed[i] == 0)
|
||||
continue;
|
||||
|
||||
if (rtw_os_need_wake_queue(padapter, i)) {
|
||||
if (DBG_DUMP_OS_QUEUE_CTL)
|
||||
RTW_INFO(FUNC_ADPT_FMT": netif_wake_subqueue[%d]\n", FUNC_ADPT_ARG(padapter), i);
|
||||
netif_wake_subqueue(padapter->pnetdev, i);
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (qcnt_freed[0] || qcnt_freed[1] || qcnt_freed[2] || qcnt_freed[3]) {
|
||||
if (rtw_os_need_wake_queue(padapter, 0)) {
|
||||
if (DBG_DUMP_OS_QUEUE_CTL)
|
||||
RTW_INFO(FUNC_ADPT_FMT": netif_wake_queue\n", FUNC_ADPT_ARG(padapter));
|
||||
netif_wake_queue(padapter->pnetdev);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TX_MCAST2UNI
|
||||
int rtw_mlcst2unicst(_adapter *padapter, struct sk_buff *skb)
|
||||
{
|
||||
struct sta_priv *pstapriv = &padapter->stapriv;
|
||||
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
|
||||
_irqL irqL;
|
||||
_list *phead, *plist;
|
||||
struct sk_buff *newskb;
|
||||
struct sta_info *psta = NULL;
|
||||
u8 chk_alive_num = 0;
|
||||
char chk_alive_list[NUM_STA];
|
||||
u8 bc_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
||||
u8 null_addr[6] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
|
||||
|
||||
int i;
|
||||
s32 res;
|
||||
|
||||
DBG_COUNTER(padapter->tx_logs.os_tx_m2u);
|
||||
|
||||
_enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
|
||||
phead = &pstapriv->asoc_list;
|
||||
plist = get_next(phead);
|
||||
|
||||
/* free sta asoc_queue */
|
||||
while ((rtw_end_of_queue_search(phead, plist)) == _FALSE) {
|
||||
int stainfo_offset;
|
||||
psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
|
||||
plist = get_next(plist);
|
||||
|
||||
stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
|
||||
if (stainfo_offset_valid(stainfo_offset))
|
||||
chk_alive_list[chk_alive_num++] = stainfo_offset;
|
||||
}
|
||||
_exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
|
||||
|
||||
for (i = 0; i < chk_alive_num; i++) {
|
||||
psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
|
||||
if (!(psta->state & _FW_LINKED)) {
|
||||
DBG_COUNTER(padapter->tx_logs.os_tx_m2u_ignore_fw_linked);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* avoid come from STA1 and send back STA1 */
|
||||
if (_rtw_memcmp(psta->cmn.mac_addr, &skb->data[6], 6) == _TRUE
|
||||
|| _rtw_memcmp(psta->cmn.mac_addr, null_addr, 6) == _TRUE
|
||||
|| _rtw_memcmp(psta->cmn.mac_addr, bc_addr, 6) == _TRUE
|
||||
) {
|
||||
DBG_COUNTER(padapter->tx_logs.os_tx_m2u_ignore_self);
|
||||
continue;
|
||||
}
|
||||
|
||||
DBG_COUNTER(padapter->tx_logs.os_tx_m2u_entry);
|
||||
|
||||
newskb = rtw_skb_copy(skb);
|
||||
|
||||
if (newskb) {
|
||||
_rtw_memcpy(newskb->data, psta->cmn.mac_addr, 6);
|
||||
res = rtw_xmit(padapter, &newskb);
|
||||
if (res < 0) {
|
||||
DBG_COUNTER(padapter->tx_logs.os_tx_m2u_entry_err_xmit);
|
||||
RTW_INFO("%s()-%d: rtw_xmit() return error! res=%d\n", __FUNCTION__, __LINE__, res);
|
||||
pxmitpriv->tx_drop++;
|
||||
rtw_skb_free(newskb);
|
||||
}
|
||||
} else {
|
||||
DBG_COUNTER(padapter->tx_logs.os_tx_m2u_entry_err_skb);
|
||||
RTW_INFO("%s-%d: rtw_skb_copy() failed!\n", __FUNCTION__, __LINE__);
|
||||
pxmitpriv->tx_drop++;
|
||||
/* rtw_skb_free(skb); */
|
||||
return _FALSE; /* Caller shall tx this multicast frame via normal way. */
|
||||
}
|
||||
}
|
||||
|
||||
rtw_skb_free(skb);
|
||||
return _TRUE;
|
||||
}
|
||||
#endif /* CONFIG_TX_MCAST2UNI */
|
||||
|
||||
|
||||
int _rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev)
|
||||
{
|
||||
_adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev);
|
||||
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
|
||||
#ifdef CONFIG_TX_MCAST2UNI
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
extern int rtw_mc2u_disable;
|
||||
#endif /* CONFIG_TX_MCAST2UNI */
|
||||
s32 res = 0;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
|
||||
u16 queue;
|
||||
#endif
|
||||
|
||||
|
||||
if (padapter->registrypriv.mp_mode) {
|
||||
RTW_INFO("MP_TX_DROP_OS_FRAME\n");
|
||||
goto drop_packet;
|
||||
}
|
||||
DBG_COUNTER(padapter->tx_logs.os_tx);
|
||||
|
||||
if (rtw_if_up(padapter) == _FALSE) {
|
||||
DBG_COUNTER(padapter->tx_logs.os_tx_err_up);
|
||||
#ifdef DBG_TX_DROP_FRAME
|
||||
RTW_INFO("DBG_TX_DROP_FRAME %s if_up fail\n", __FUNCTION__);
|
||||
#endif
|
||||
goto drop_packet;
|
||||
}
|
||||
|
||||
rtw_check_xmit_resource(padapter, pkt);
|
||||
|
||||
#ifdef CONFIG_TX_MCAST2UNI
|
||||
if (!rtw_mc2u_disable
|
||||
&& MLME_IS_AP(padapter)
|
||||
&& (IP_MCAST_MAC(pkt->data)
|
||||
|| ICMPV6_MCAST_MAC(pkt->data)
|
||||
#ifdef CONFIG_TX_BCAST2UNI
|
||||
|| is_broadcast_mac_addr(pkt->data)
|
||||
#endif
|
||||
)
|
||||
&& (padapter->registrypriv.wifi_spec == 0)
|
||||
) {
|
||||
if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME / 4)) {
|
||||
res = rtw_mlcst2unicst(padapter, pkt);
|
||||
if (res == _TRUE)
|
||||
goto exit;
|
||||
} else {
|
||||
/* RTW_INFO("Stop M2U(%d, %d)! ", pxmitpriv->free_xmitframe_cnt, pxmitpriv->free_xmitbuf_cnt); */
|
||||
/* RTW_INFO("!m2u ); */
|
||||
DBG_COUNTER(padapter->tx_logs.os_tx_m2u_stop);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_TX_MCAST2UNI */
|
||||
|
||||
res = rtw_xmit(padapter, &pkt);
|
||||
if (res < 0) {
|
||||
#ifdef DBG_TX_DROP_FRAME
|
||||
RTW_INFO("DBG_TX_DROP_FRAME %s rtw_xmit fail\n", __FUNCTION__);
|
||||
#endif
|
||||
goto drop_packet;
|
||||
}
|
||||
|
||||
goto exit;
|
||||
|
||||
drop_packet:
|
||||
pxmitpriv->tx_drop++;
|
||||
rtw_os_pkt_complete(padapter, pkt);
|
||||
|
||||
exit:
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev)
|
||||
{
|
||||
_adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev);
|
||||
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
|
||||
int ret = 0;
|
||||
|
||||
if (pkt) {
|
||||
if (check_fwstate(pmlmepriv, WIFI_MONITOR_STATE) == _TRUE) {
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
|
||||
rtw_monitor_xmit_entry((struct sk_buff *)pkt, pnetdev);
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
rtw_mstat_update(MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, pkt->truesize);
|
||||
ret = _rtw_xmit_entry(pkt, pnetdev);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
Reference in New Issue
Block a user