­

openvswitch代码分析 – fastpath数据结构

  • 2019 年 12 月 7 日
  • 筆記

前言

本文以openvswitch 2.5.0源码,主要介绍用户态openvswitch代码相关分析,分析其设计的原因和精彩之处。

报文结构

网络中最重要的就是报文,因此首先分析报文结构struct dp_packet:

/* Buffer for holding packet data.  A dp_packet is automatically reallocated   * as necessary if it grows too large for the available memory.   */  struct dp_packet {  #ifdef DPDK_NETDEV      struct rte_mbuf mbuf;       /* DPDK mbuf */  #else      void *base_;                /* First byte of allocated space. */      uint16_t allocated_;        /* Number of bytes allocated. */      uint16_t data_ofs;          /* First byte actually in use. */      uint32_t size_;             /* Number of bytes in use. */      uint32_t rss_hash;          /* Packet hash. */      bool rss_hash_valid;        /* Is the 'rss_hash' valid? */  #endif      enum dp_packet_source source;  /* Source of memory allocated as 'base'. */      uint8_t l2_pad_size;           /* Detected l2 padding size.                                      * Padding is non-pullable. */      uint16_t l2_5_ofs;             /* MPLS label stack offset, or UINT16_MAX */      uint16_t l3_ofs;               /* Network-level header offset,                                      * or UINT16_MAX. */      uint16_t l4_ofs;               /* Transport-level header offset,                                        or UINT16_MAX. */      struct pkt_metadata md;  };

可以看出,在采用dpdk时直接使用struct rte_mbuf结构,再加上部分辅助字段。

struct pkt_metadata结构如下:

/* Datapath packet metadata */  struct pkt_metadata {      uint32_t recirc_id;         /* Recirculation id carried with the                                     recirculating packets. 0 for packets                                     received from the wire. */      uint32_t dp_hash;           /* hash value computed by the recirculation                                     action. */      uint32_t skb_priority;      /* Packet priority for QoS. */      uint32_t pkt_mark;          /* Packet mark. */      uint16_t ct_state;          /* Connection state. */      uint16_t ct_zone;           /* Connection zone. */      uint32_t ct_mark;           /* Connection mark. */      ovs_u128 ct_label;          /* Connection label. */      union flow_in_port in_port; /* Input port. */      struct flow_tnl tunnel;     /* Encapsulating tunnel parameters. Note that                                   * if 'ip_dst' == 0, the rest of the fields may                                   * be uninitialized. */  };

这个结构用于存储报文的一些元数据和隧道信息,在报文第一次处理前进行初始化。

匹配结构

匹配结构的主角是struct flow,其包含了一个报文的完整特征。此结构连接了配置和报文处理,是openflow的核心结构:

struct flow {      /* Metadata */      struct flow_tnl tunnel;     /* Encapsulating tunnel parameters. */      ovs_be64 metadata;          /* OpenFlow Metadata. */      uint32_t regs[FLOW_N_REGS]; /* Registers. */      uint32_t skb_priority;      /* Packet priority for QoS. */      uint32_t pkt_mark;          /* Packet mark. */      uint32_t dp_hash;           /* Datapath computed hash value. The exact                                   * computation is opaque to the user space. */      union flow_in_port in_port; /* Input port.*/      uint32_t recirc_id;         /* Must be exact match. */      uint16_t ct_state;          /* Connection tracking state. */      uint16_t ct_zone;           /* Connection tracking zone. */      uint32_t ct_mark;           /* Connection mark.*/      uint8_t pad1[4];            /* Pad to 64 bits. */      ovs_u128 ct_label;          /* Connection label. */      uint32_t conj_id;           /* Conjunction ID. */      ofp_port_t actset_output;   /* Output port in action set. */      uint8_t pad2[2];            /* Pad to 64 bits. */        /* L2, Order the same as in the Ethernet header! (64-bit aligned) */      struct eth_addr dl_dst;     /* Ethernet destination address. */      struct eth_addr dl_src;     /* Ethernet source address. */      ovs_be16 dl_type;           /* Ethernet frame type. */      ovs_be16 vlan_tci;          /* If 802.1Q, TCI | VLAN_CFI; otherwise 0. */      ovs_be32 mpls_lse[ROUND_UP(FLOW_MAX_MPLS_LABELS, 2)]; /* MPLS label stack                                                               (with padding). */      /* L3 (64-bit aligned) */      ovs_be32 nw_src;            /* IPv4 source address. */      ovs_be32 nw_dst;            /* IPv4 destination address. */      struct in6_addr ipv6_src;   /* IPv6 source address. */      struct in6_addr ipv6_dst;   /* IPv6 destination address. */      ovs_be32 ipv6_label;        /* IPv6 flow label. */      uint8_t nw_frag;            /* FLOW_FRAG_* flags. */      uint8_t nw_tos;             /* IP ToS (including DSCP and ECN). */      uint8_t nw_ttl;             /* IP TTL/Hop Limit. */      uint8_t nw_proto;           /* IP protocol or low 8 bits of ARP opcode. */      struct in6_addr nd_target;  /* IPv6 neighbor discovery (ND) target. */      struct eth_addr arp_sha;    /* ARP/ND source hardware address. */      struct eth_addr arp_tha;    /* ARP/ND target hardware address. */      ovs_be16 tcp_flags;         /* TCP flags. With L3 to avoid matching L4. */      ovs_be16 pad3;              /* Pad to 64 bits. */        /* L4 (64-bit aligned) */      ovs_be16 tp_src;            /* TCP/UDP/SCTP source port/ICMP type. */      ovs_be16 tp_dst;            /* TCP/UDP/SCTP destination port/ICMP code. */      ovs_be32 igmp_group_ip4;    /* IGMP group IPv4 address.                                   * Keep last for BUILD_ASSERT_DECL below. */  };

在报文处理流程中,使用最多的结构是struct netdev_flow_key,每个报文在匹配时前都会解析到其中:

/* Stores a miniflow with inline values */  struct netdev_flow_key {      uint32_t hash;       /* Hash function differs for different users. */      uint32_t len;        /* Length of the following miniflow (incl. map). */      struct miniflow mf;      uint64_t buf[FLOW_MAX_PACKET_U64S];  };

包含了hash值,这个值可以是由网卡计算的rss;也可以是多次处理时由软件计算出的。计算方法的不同可以用于查找flow或者flow_cache。len字段大小等于miniflow加上实际的buf数据长度,用于快速匹配。

接着来看struct miniflow这个结构:

typedef unsigned long long map_t;  #define MAP_T_BITS (sizeof(map_t) * CHAR_BIT)  #define FLOW_U64S (sizeof(struct flow) / sizeof(uint64_t))    #define DIV_ROUND_UP(X, Y) (((X) + ((Y) - 1)) / (Y))    #define FLOWMAP_UNITS DIV_ROUND_UP(FLOW_U64S, MAP_T_BITS)    struct flowmap {      map_t bits[FLOWMAP_UNITS];  };    struct miniflow {      struct flowmap map;      /* Followed by:       *     uint64_t values[n];       * where 'n' is miniflow_n_values(miniflow). */  };

这里把一些辅助的宏也罗列出来。

从名称上看也可以得知这是一个精简的flow的表示方法,因为struct flow结构有552字节大小,如果每包直接匹配552字节的flow,会对性能造成很大的影响。

从结构来看这是一个bitmap,大小为16字节,这个结构十分精巧,每bit对应表示flow对应位置的8字节是否存在数据。

struct netdev_flow_key的buf字段(480字节,因为struct flow中部分字段不用作键值)用于紧密放置真实的flow字段数据。

在进行cache匹配时,hash查找到桶,判断cache有效性后,直接从struct netdev_flow_key的mf字段开始比较(使用桶内的len字段作为比较长度),如果在报文类型存在不同,会在开始的16字节bitmap就匹配失败,大大节省了匹配时间;并且len极大地减小了需比较的长度,避免出现比较552字节的尴尬。struct miniflow是openflow快速匹配的精髓所在。

但是需要注意,这个结构只能用作精确比较,无法直接用于比较分类器(dpcls)。

如果cache无法命中,就需要去查找flow,同样的道理,直接比较552字节很不合理,因此抽象出一个结构struct dpcls_rule用于查找flow,如下所示:

/* Simple non-wildcarding single-priority classifier. */  struct dpcls {      struct cmap subtables_map;      struct pvector subtables;  };    /* A rule to be inserted to the classifier. */  struct dpcls_rule {      struct cmap_node cmap_node;   /* Within struct dpcls_subtable 'rules'. */      struct netdev_flow_key *mask; /* Subtable's mask. */      struct netdev_flow_key flow;  /* Matching key. */      /* 'flow' must be the last field, additional space is allocated here. */  };

dpcls用匹配掩码进行了拆分,然后每一种掩码构建一个哈希表进行查询,查询时按掩码表依次进行,命中后创建EMC和调整位置。到这里读者或许会有个疑问,此处为何没有优先级判断?

答案是在完整查询flow_tables生成dpcls时,其优先级有就是匹配中最高的,并且上层会在有规则变化时,启动检查任务对dpcls进行校验,保证其准确性。