diff -uNr linux_org/Documentation/Configure.help linux/Documentation/Configure.help --- linux_org/Documentation/Configure.help 2006-10-27 14:08:20.000000000 +0200 +++ linux/Documentation/Configure.help 2006-10-27 14:11:52.000000000 +0200 @@ -2848,6 +2848,31 @@ If you want to compile it as a module, say M here and read . If unsure, say `Y'. +PPTP conntrack and NAT support +CONFIG_IP_NF_PPTP + This module adds support for PPTP (Point to Point Tunnelling Protocol, + RFC2637) conncection tracking and NAT. + + If you are running PPTP sessions over a stateful firewall or NAT box, + you may want to enable this feature. + + Please note that not all PPTP modes of operation are supported yet. + For more info, read top of the file net/ipv4/netfilter/ip_conntrack_pptp.c + + If you want to compile it as a module, say M here and read + Documentation/modules.txt. If unsure, say `N'. + +GRE protocol conntrack and NAT support +CONFIG_IP_NF_CT_PROTO_GRE + This module adds generic support for connection tracking and NAT of the + GRE protocol (RFC1701, RFC2784). Please note that this will only work + with GRE connections using the key field of the GRE header. + + You will need GRE support to enable PPTP support. + + If you want to compile it as a module, say `M' here and read + Documentation/modules.txt. If unsire, say `N'. + User space queueing via NETLINK CONFIG_IP_NF_QUEUE Netfilter has the ability to queue packets to user space: the diff -uNr linux_org/include/linux/netfilter_ipv4/ip_conntrack.h linux/include/linux/netfilter_ipv4/ip_conntrack.h --- linux_org/include/linux/netfilter_ipv4/ip_conntrack.h 2004-11-24 12:13:57.000000000 +0100 +++ linux/include/linux/netfilter_ipv4/ip_conntrack.h 2006-10-27 14:11:52.000000000 +0200 @@ -50,19 +50,23 @@ #include #include +#include /* per conntrack: protocol private data */ union ip_conntrack_proto { /* insert conntrack proto private data here */ + struct ip_ct_gre gre; struct ip_ct_tcp tcp; struct ip_ct_icmp icmp; }; union ip_conntrack_expect_proto { /* insert expect proto private data here */ + struct ip_ct_gre_expect gre; }; /* Add protocol helper include file here */ +#include #include #include @@ -71,6 +75,7 @@ /* per expectation: application helper private data */ union ip_conntrack_expect_help { /* insert conntrack helper private data (expect) here */ + struct ip_ct_pptp_expect exp_pptp_info; struct ip_ct_amanda_expect exp_amanda_info; struct ip_ct_ftp_expect exp_ftp_info; struct ip_ct_irc_expect exp_irc_info; @@ -85,16 +90,19 @@ /* per conntrack: application helper private data */ union ip_conntrack_help { /* insert conntrack helper private data (master) here */ + struct ip_ct_pptp_master ct_pptp_info; struct ip_ct_ftp_master ct_ftp_info; struct ip_ct_irc_master ct_irc_info; }; #ifdef CONFIG_IP_NF_NAT_NEEDED #include +#include /* per conntrack: nat application helper private data */ union ip_conntrack_nat_help { /* insert nat helper private data here */ + struct ip_nat_pptp nat_pptp_info; }; #endif diff -uNr linux_org/include/linux/netfilter_ipv4/ip_conntrack_pptp.h linux/include/linux/netfilter_ipv4/ip_conntrack_pptp.h --- linux_org/include/linux/netfilter_ipv4/ip_conntrack_pptp.h 1970-01-01 01:00:00.000000000 +0100 +++ linux/include/linux/netfilter_ipv4/ip_conntrack_pptp.h 2006-10-27 14:11:52.000000000 +0200 @@ -0,0 +1,313 @@ +/* PPTP constants and structs */ +#ifndef _CONNTRACK_PPTP_H +#define _CONNTRACK_PPTP_H + +/* state of the control session */ +enum pptp_ctrlsess_state { + PPTP_SESSION_NONE, /* no session present */ + PPTP_SESSION_ERROR, /* some session error */ + PPTP_SESSION_STOPREQ, /* stop_sess request seen */ + PPTP_SESSION_REQUESTED, /* start_sess request seen */ + PPTP_SESSION_CONFIRMED, /* session established */ +}; + +/* state of the call inside the control session */ +enum pptp_ctrlcall_state { + PPTP_CALL_NONE, + PPTP_CALL_ERROR, + PPTP_CALL_OUT_REQ, + PPTP_CALL_OUT_CONF, + PPTP_CALL_IN_REQ, + PPTP_CALL_IN_REP, + PPTP_CALL_IN_CONF, + PPTP_CALL_CLEAR_REQ, +}; + + +/* conntrack private data */ +struct ip_ct_pptp_master { + enum pptp_ctrlsess_state sstate; /* session state */ + + /* everything below is going to be per-expectation in newnat, + * since there could be more than one call within one session */ + enum pptp_ctrlcall_state cstate; /* call state */ + u_int16_t pac_call_id; /* call id of PAC, host byte order */ + u_int16_t pns_call_id; /* call id of PNS, host byte order */ +}; + +/* conntrack_expect private member */ +struct ip_ct_pptp_expect { + enum pptp_ctrlcall_state cstate; /* call state */ + u_int16_t pac_call_id; /* call id of PAC */ + u_int16_t pns_call_id; /* call id of PNS */ +}; + + +#ifdef __KERNEL__ + +#include +DECLARE_LOCK_EXTERN(ip_pptp_lock); + +#define IP_CONNTR_PPTP PPTP_CONTROL_PORT + +union pptp_ctrl_union { + void *rawreq; + struct PptpStartSessionRequest *sreq; + struct PptpStartSessionReply *srep; + struct PptpStopSessionReqest *streq; + struct PptpStopSessionReply *strep; + struct PptpOutCallRequest *ocreq; + struct PptpOutCallReply *ocack; + struct PptpInCallRequest *icreq; + struct PptpInCallReply *icack; + struct PptpInCallConnected *iccon; + struct PptpClearCallRequest *clrreq; + struct PptpCallDisconnectNotify *disc; + struct PptpWanErrorNotify *wanerr; + struct PptpSetLinkInfo *setlink; +}; + + + +#define PPTP_CONTROL_PORT 1723 + +#define PPTP_PACKET_CONTROL 1 +#define PPTP_PACKET_MGMT 2 + +#define PPTP_MAGIC_COOKIE 0x1a2b3c4d + +struct pptp_pkt_hdr { + __u16 packetLength; + __u16 packetType; + __u32 magicCookie; +}; + +/* PptpControlMessageType values */ +#define PPTP_START_SESSION_REQUEST 1 +#define PPTP_START_SESSION_REPLY 2 +#define PPTP_STOP_SESSION_REQUEST 3 +#define PPTP_STOP_SESSION_REPLY 4 +#define PPTP_ECHO_REQUEST 5 +#define PPTP_ECHO_REPLY 6 +#define PPTP_OUT_CALL_REQUEST 7 +#define PPTP_OUT_CALL_REPLY 8 +#define PPTP_IN_CALL_REQUEST 9 +#define PPTP_IN_CALL_REPLY 10 +#define PPTP_IN_CALL_CONNECT 11 +#define PPTP_CALL_CLEAR_REQUEST 12 +#define PPTP_CALL_DISCONNECT_NOTIFY 13 +#define PPTP_WAN_ERROR_NOTIFY 14 +#define PPTP_SET_LINK_INFO 15 + +#define PPTP_MSG_MAX 15 + +/* PptpGeneralError values */ +#define PPTP_ERROR_CODE_NONE 0 +#define PPTP_NOT_CONNECTED 1 +#define PPTP_BAD_FORMAT 2 +#define PPTP_BAD_VALUE 3 +#define PPTP_NO_RESOURCE 4 +#define PPTP_BAD_CALLID 5 +#define PPTP_REMOVE_DEVICE_ERROR 6 + +struct PptpControlHeader { + __u16 messageType; + __u16 reserved; +}; + +/* FramingCapability Bitmap Values */ +#define PPTP_FRAME_CAP_ASYNC 0x1 +#define PPTP_FRAME_CAP_SYNC 0x2 + +/* BearerCapability Bitmap Values */ +#define PPTP_BEARER_CAP_ANALOG 0x1 +#define PPTP_BEARER_CAP_DIGITAL 0x2 + +struct PptpStartSessionRequest { + __u16 protocolVersion; + __u8 reserved1; + __u8 reserved2; + __u32 framingCapability; + __u32 bearerCapability; + __u16 maxChannels; + __u16 firmwareRevision; + __u8 hostName[64]; + __u8 vendorString[64]; +}; + +/* PptpStartSessionResultCode Values */ +#define PPTP_START_OK 1 +#define PPTP_START_GENERAL_ERROR 2 +#define PPTP_START_ALREADY_CONNECTED 3 +#define PPTP_START_NOT_AUTHORIZED 4 +#define PPTP_START_UNKNOWN_PROTOCOL 5 + +struct PptpStartSessionReply { + __u16 protocolVersion; + __u8 resultCode; + __u8 generalErrorCode; + __u32 framingCapability; + __u32 bearerCapability; + __u16 maxChannels; + __u16 firmwareRevision; + __u8 hostName[64]; + __u8 vendorString[64]; +}; + +/* PptpStopReasons */ +#define PPTP_STOP_NONE 1 +#define PPTP_STOP_PROTOCOL 2 +#define PPTP_STOP_LOCAL_SHUTDOWN 3 + +struct PptpStopSessionRequest { + __u8 reason; +}; + +/* PptpStopSessionResultCode */ +#define PPTP_STOP_OK 1 +#define PPTP_STOP_GENERAL_ERROR 2 + +struct PptpStopSessionReply { + __u8 resultCode; + __u8 generalErrorCode; +}; + +struct PptpEchoRequest { + __u32 identNumber; +}; + +/* PptpEchoReplyResultCode */ +#define PPTP_ECHO_OK 1 +#define PPTP_ECHO_GENERAL_ERROR 2 + +struct PptpEchoReply { + __u32 identNumber; + __u8 resultCode; + __u8 generalErrorCode; + __u16 reserved; +}; + +/* PptpFramingType */ +#define PPTP_ASYNC_FRAMING 1 +#define PPTP_SYNC_FRAMING 2 +#define PPTP_DONT_CARE_FRAMING 3 + +/* PptpCallBearerType */ +#define PPTP_ANALOG_TYPE 1 +#define PPTP_DIGITAL_TYPE 2 +#define PPTP_DONT_CARE_BEARER_TYPE 3 + +struct PptpOutCallRequest { + __u16 callID; + __u16 callSerialNumber; + __u32 minBPS; + __u32 maxBPS; + __u32 bearerType; + __u32 framingType; + __u16 packetWindow; + __u16 packetProcDelay; + __u16 reserved1; + __u16 phoneNumberLength; + __u16 reserved2; + __u8 phoneNumber[64]; + __u8 subAddress[64]; +}; + +/* PptpCallResultCode */ +#define PPTP_OUTCALL_CONNECT 1 +#define PPTP_OUTCALL_GENERAL_ERROR 2 +#define PPTP_OUTCALL_NO_CARRIER 3 +#define PPTP_OUTCALL_BUSY 4 +#define PPTP_OUTCALL_NO_DIAL_TONE 5 +#define PPTP_OUTCALL_TIMEOUT 6 +#define PPTP_OUTCALL_DONT_ACCEPT 7 + +struct PptpOutCallReply { + __u16 callID; + __u16 peersCallID; + __u8 resultCode; + __u8 generalErrorCode; + __u16 causeCode; + __u32 connectSpeed; + __u16 packetWindow; + __u16 packetProcDelay; + __u32 physChannelID; +}; + +struct PptpInCallRequest { + __u16 callID; + __u16 callSerialNumber; + __u32 callBearerType; + __u32 physChannelID; + __u16 dialedNumberLength; + __u16 dialingNumberLength; + __u8 dialedNumber[64]; + __u8 dialingNumber[64]; + __u8 subAddress[64]; +}; + +/* PptpInCallResultCode */ +#define PPTP_INCALL_ACCEPT 1 +#define PPTP_INCALL_GENERAL_ERROR 2 +#define PPTP_INCALL_DONT_ACCEPT 3 + +struct PptpInCallReply { + __u16 callID; + __u16 peersCallID; + __u8 resultCode; + __u8 generalErrorCode; + __u16 packetWindow; + __u16 packetProcDelay; + __u16 reserved; +}; + +struct PptpInCallConnected { + __u16 peersCallID; + __u16 reserved; + __u32 connectSpeed; + __u16 packetWindow; + __u16 packetProcDelay; + __u32 callFramingType; +}; + +struct PptpClearCallRequest { + __u16 callID; + __u16 reserved; +}; + +struct PptpCallDisconnectNotify { + __u16 callID; + __u8 resultCode; + __u8 generalErrorCode; + __u16 causeCode; + __u16 reserved; + __u8 callStatistics[128]; +}; + +struct PptpWanErrorNotify { + __u16 peersCallID; + __u16 reserved; + __u32 crcErrors; + __u32 framingErrors; + __u32 hardwareOverRuns; + __u32 bufferOverRuns; + __u32 timeoutErrors; + __u32 alignmentErrors; +}; + +struct PptpSetLinkInfo { + __u16 peersCallID; + __u16 reserved; + __u32 sendAccm; + __u32 recvAccm; +}; + + +struct pptp_priv_data { + __u16 call_id; + __u16 mcall_id; + __u16 pcall_id; +}; + +#endif /* __KERNEL__ */ +#endif /* _CONNTRACK_PPTP_H */ diff -uNr linux_org/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h linux/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h --- linux_org/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h 1970-01-01 01:00:00.000000000 +0100 +++ linux/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h 2006-10-27 14:11:52.000000000 +0200 @@ -0,0 +1,123 @@ +#ifndef _CONNTRACK_PROTO_GRE_H +#define _CONNTRACK_PROTO_GRE_H +#include + +/* GRE PROTOCOL HEADER */ + +/* GRE Version field */ +#define GRE_VERSION_1701 0x0 +#define GRE_VERSION_PPTP 0x1 + +/* GRE Protocol field */ +#define GRE_PROTOCOL_PPTP 0x880B + +/* GRE Flags */ +#define GRE_FLAG_C 0x80 +#define GRE_FLAG_R 0x40 +#define GRE_FLAG_K 0x20 +#define GRE_FLAG_S 0x10 +#define GRE_FLAG_A 0x80 + +#define GRE_IS_C(f) ((f)&GRE_FLAG_C) +#define GRE_IS_R(f) ((f)&GRE_FLAG_R) +#define GRE_IS_K(f) ((f)&GRE_FLAG_K) +#define GRE_IS_S(f) ((f)&GRE_FLAG_S) +#define GRE_IS_A(f) ((f)&GRE_FLAG_A) + +/* GRE is a mess: Four different standards */ +struct gre_hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u16 rec:3, + srr:1, + seq:1, + key:1, + routing:1, + csum:1, + version:3, + reserved:4, + ack:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u16 csum:1, + routing:1, + key:1, + seq:1, + srr:1, + rec:3, + ack:1, + reserved:4, + version:3; +#else +#error "Adjust your defines" +#endif + __u16 protocol; +}; + +/* modified GRE header for PPTP */ +struct gre_hdr_pptp { + __u8 flags; /* bitfield */ + __u8 version; /* should be GRE_VERSION_PPTP */ + __u16 protocol; /* should be GRE_PROTOCOL_PPTP */ + __u16 payload_len; /* size of ppp payload, not inc. gre header */ + __u16 call_id; /* peer's call_id for this session */ + __u32 seq; /* sequence number. Present if S==1 */ + __u32 ack; /* seq number of highest packet recieved by */ + /* sender in this session */ +}; + + +/* this is part of ip_conntrack */ +struct ip_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +/* this is part of ip_conntrack_expect */ +struct ip_ct_gre_expect { + struct ip_ct_gre_keymap *keymap_orig, *keymap_reply; +}; + +#ifdef __KERNEL__ +struct ip_conntrack_expect; + +/* structure for original <-> reply keymap */ +struct ip_ct_gre_keymap { + struct list_head list; + + struct ip_conntrack_tuple tuple; +}; + + +/* add new tuple->key_reply pair to keymap */ +int ip_ct_gre_keymap_add(struct ip_conntrack_expect *exp, + struct ip_conntrack_tuple *t, + int reply); + +/* change an existing keymap entry */ +void ip_ct_gre_keymap_change(struct ip_ct_gre_keymap *km, + struct ip_conntrack_tuple *t); + +/* delete keymap entries */ +void ip_ct_gre_keymap_destroy(struct ip_conntrack_expect *exp); + + +/* get pointer to gre key, if present */ +static inline u_int32_t *gre_key(struct gre_hdr *greh) +{ + if (!greh->key) + return NULL; + if (greh->csum || greh->routing) + return (u_int32_t *) (greh+sizeof(*greh)+4); + return (u_int32_t *) (greh+sizeof(*greh)); +} + +/* get pointer ot gre csum, if present */ +static inline u_int16_t *gre_csum(struct gre_hdr *greh) +{ + if (!greh->csum) + return NULL; + return (u_int16_t *) (greh+sizeof(*greh)); +} + +#endif /* __KERNEL__ */ + +#endif /* _CONNTRACK_PROTO_GRE_H */ diff -uNr linux_org/include/linux/netfilter_ipv4/ip_conntrack_tuple.h linux/include/linux/netfilter_ipv4/ip_conntrack_tuple.h --- linux_org/include/linux/netfilter_ipv4/ip_conntrack_tuple.h 2003-11-17 02:07:46.000000000 +0100 +++ linux/include/linux/netfilter_ipv4/ip_conntrack_tuple.h 2006-10-27 14:11:52.000000000 +0200 @@ -14,7 +14,7 @@ union ip_conntrack_manip_proto { /* Add other protocols here. */ - u_int16_t all; + u_int32_t all; struct { u_int16_t port; @@ -25,6 +25,9 @@ struct { u_int16_t id; } icmp; + struct { + u_int32_t key; + } gre; }; /* The manipulable part of the tuple. */ @@ -44,7 +47,7 @@ u_int32_t ip; union { /* Add other protocols here. */ - u_int16_t all; + u_int64_t all; struct { u_int16_t port; @@ -55,6 +58,11 @@ struct { u_int8_t type, code; } icmp; + struct { + u_int16_t protocol; + u_int8_t version; + u_int32_t key; + } gre; } u; /* The protocol. */ @@ -80,10 +88,16 @@ #ifdef __KERNEL__ #define DUMP_TUPLE(tp) \ -DEBUGP("tuple %p: %u %u.%u.%u.%u:%hu -> %u.%u.%u.%u:%hu\n", \ +DEBUGP("tuple %p: %u %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n", \ (tp), (tp)->dst.protonum, \ - NIPQUAD((tp)->src.ip), ntohs((tp)->src.u.all), \ - NIPQUAD((tp)->dst.ip), ntohs((tp)->dst.u.all)) + NIPQUAD((tp)->src.ip), ntohl((tp)->src.u.all), \ + NIPQUAD((tp)->dst.ip), ntohl((tp)->dst.u.all)) + +#define DUMP_TUPLE_RAW(x) \ + DEBUGP("tuple %p: %u %u.%u.%u.%u:0x%08x -> %u.%u.%u.%u:0x%08x\n",\ + (x), (x)->dst.protonum, \ + NIPQUAD((x)->src.ip), ntohl((x)->src.u.all), \ + NIPQUAD((x)->dst.ip), ntohl((x)->dst.u.all)) #define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL) diff -uNr linux_org/include/linux/netfilter_ipv4/ip_conntrack_tuple.h.orig linux/include/linux/netfilter_ipv4/ip_conntrack_tuple.h.orig --- linux_org/include/linux/netfilter_ipv4/ip_conntrack_tuple.h.orig 1970-01-01 01:00:00.000000000 +0100 +++ linux/include/linux/netfilter_ipv4/ip_conntrack_tuple.h.orig 2003-11-17 02:07:46.000000000 +0100 @@ -0,0 +1,139 @@ +#ifndef _IP_CONNTRACK_TUPLE_H +#define _IP_CONNTRACK_TUPLE_H + +/* A `tuple' is a structure containing the information to uniquely + identify a connection. ie. if two packets have the same tuple, they + are in the same connection; if not, they are not. + + We divide the structure along "manipulatable" and + "non-manipulatable" lines, for the benefit of the NAT code. +*/ + +/* The protocol-specific manipulable parts of the tuple: always in + network order! */ +union ip_conntrack_manip_proto +{ + /* Add other protocols here. */ + u_int16_t all; + + struct { + u_int16_t port; + } tcp; + struct { + u_int16_t port; + } udp; + struct { + u_int16_t id; + } icmp; +}; + +/* The manipulable part of the tuple. */ +struct ip_conntrack_manip +{ + u_int32_t ip; + union ip_conntrack_manip_proto u; +}; + +/* This contains the information to distinguish a connection. */ +struct ip_conntrack_tuple +{ + struct ip_conntrack_manip src; + + /* These are the parts of the tuple which are fixed. */ + struct { + u_int32_t ip; + union { + /* Add other protocols here. */ + u_int16_t all; + + struct { + u_int16_t port; + } tcp; + struct { + u_int16_t port; + } udp; + struct { + u_int8_t type, code; + } icmp; + } u; + + /* The protocol. */ + u_int16_t protonum; + } dst; +}; + +/* This is optimized opposed to a memset of the whole structure. Everything we + * really care about is the source/destination unions */ +#define IP_CT_TUPLE_U_BLANK(tuple) \ + do { \ + (tuple)->src.u.all = 0; \ + (tuple)->dst.u.all = 0; \ + } while (0) + +enum ip_conntrack_dir +{ + IP_CT_DIR_ORIGINAL, + IP_CT_DIR_REPLY, + IP_CT_DIR_MAX +}; + +#ifdef __KERNEL__ + +#define DUMP_TUPLE(tp) \ +DEBUGP("tuple %p: %u %u.%u.%u.%u:%hu -> %u.%u.%u.%u:%hu\n", \ + (tp), (tp)->dst.protonum, \ + NIPQUAD((tp)->src.ip), ntohs((tp)->src.u.all), \ + NIPQUAD((tp)->dst.ip), ntohs((tp)->dst.u.all)) + +#define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL) + +/* If we're the first tuple, it's the original dir. */ +#define DIRECTION(h) ((enum ip_conntrack_dir)(&(h)->ctrack->tuplehash[1] == (h))) + +/* Connections have two entries in the hash table: one for each way */ +struct ip_conntrack_tuple_hash +{ + struct list_head list; + + struct ip_conntrack_tuple tuple; + + /* this == &ctrack->tuplehash[DIRECTION(this)]. */ + struct ip_conntrack *ctrack; +}; + +#endif /* __KERNEL__ */ + +static inline int ip_ct_tuple_src_equal(const struct ip_conntrack_tuple *t1, + const struct ip_conntrack_tuple *t2) +{ + return t1->src.ip == t2->src.ip + && t1->src.u.all == t2->src.u.all; +} + +static inline int ip_ct_tuple_dst_equal(const struct ip_conntrack_tuple *t1, + const struct ip_conntrack_tuple *t2) +{ + return t1->dst.ip == t2->dst.ip + && t1->dst.u.all == t2->dst.u.all + && t1->dst.protonum == t2->dst.protonum; +} + +static inline int ip_ct_tuple_equal(const struct ip_conntrack_tuple *t1, + const struct ip_conntrack_tuple *t2) +{ + return ip_ct_tuple_src_equal(t1, t2) && ip_ct_tuple_dst_equal(t1, t2); +} + +static inline int ip_ct_tuple_mask_cmp(const struct ip_conntrack_tuple *t, + const struct ip_conntrack_tuple *tuple, + const struct ip_conntrack_tuple *mask) +{ + return !(((t->src.ip ^ tuple->src.ip) & mask->src.ip) + || ((t->dst.ip ^ tuple->dst.ip) & mask->dst.ip) + || ((t->src.u.all ^ tuple->src.u.all) & mask->src.u.all) + || ((t->dst.u.all ^ tuple->dst.u.all) & mask->dst.u.all) + || ((t->dst.protonum ^ tuple->dst.protonum) + & mask->dst.protonum)); +} + +#endif /* _IP_CONNTRACK_TUPLE_H */ diff -uNr linux_org/include/linux/netfilter_ipv4/ip_nat_pptp.h linux/include/linux/netfilter_ipv4/ip_nat_pptp.h --- linux_org/include/linux/netfilter_ipv4/ip_nat_pptp.h 1970-01-01 01:00:00.000000000 +0100 +++ linux/include/linux/netfilter_ipv4/ip_nat_pptp.h 2006-10-27 14:11:52.000000000 +0200 @@ -0,0 +1,11 @@ +/* PPTP constants and structs */ +#ifndef _NAT_PPTP_H +#define _NAT_PPTP_H + +/* conntrack private data */ +struct ip_nat_pptp { + u_int16_t pns_call_id; /* NAT'ed PNS call id */ + u_int16_t pac_call_id; /* NAT'ed PAC call id */ +}; + +#endif /* _NAT_PPTP_H */ diff -uNr linux_org/net/ipv4/netfilter/Config.in linux/net/ipv4/netfilter/Config.in --- linux_org/net/ipv4/netfilter/Config.in 2003-08-13 19:19:30.000000000 +0200 +++ linux/net/ipv4/netfilter/Config.in 2006-10-27 14:11:52.000000000 +0200 @@ -7,6 +7,11 @@ tristate 'Connection tracking (required for masq/NAT)' CONFIG_IP_NF_CONNTRACK if [ "$CONFIG_IP_NF_CONNTRACK" != "n" ]; then dep_tristate ' FTP protocol support' CONFIG_IP_NF_FTP $CONFIG_IP_NF_CONNTRACK + dep_tristate ' GRE protocol support' CONFIG_IP_NF_CT_PROTO_GRE $CONFIG_IP_NF_CONNTRACK + dep_tristate ' PPTP protocol support' CONFIG_IP_NF_PPTP $CONFIG_IP_NF_CONNTRACK + if [ "$CONFIG_IP_NF_PPTP" != "n" ]; then + bool ' PPTP verbose debug' CONFIG_IP_NF_PPTP_DEBUG + fi dep_tristate ' Amanda protocol support' CONFIG_IP_NF_AMANDA $CONFIG_IP_NF_CONNTRACK dep_tristate ' TFTP protocol support' CONFIG_IP_NF_TFTP $CONFIG_IP_NF_CONNTRACK dep_tristate ' IRC protocol support' CONFIG_IP_NF_IRC $CONFIG_IP_NF_CONNTRACK @@ -67,6 +72,20 @@ fi fi bool ' NAT of local connections (READ HELP)' CONFIG_IP_NF_NAT_LOCAL + if [ "$CONFIG_IP_NF_PPTP" = "m" ]; then + define_tristate CONFIG_IP_NF_NAT_PPTP m + else + if [ "$CONFIG_IP_NF_PPTP" = "y" ]; then + define_tristate CONFIG_IP_NF_NAT_PPTP $CONFIG_IP_NF_NAT + fi + fi + if [ "$CONFIG_IP_NF_CT_PROTO_GRE" = "m" ]; then + define_tristate CONFIG_IP_NF_NAT_PROTO_GRE m + else + if [ "$CONFIG_IP_NF_CT_PROTO_GRE" = "y" ]; then + define_tristate CONFIG_IP_NF_NAT_PROTO_GRE $CONFIG_IP_NF_NAT + fi + fi if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then dep_tristate ' Basic SNMP-ALG support (EXPERIMENTAL)' CONFIG_IP_NF_NAT_SNMP_BASIC $CONFIG_IP_NF_NAT fi diff -uNr linux_org/net/ipv4/netfilter/Makefile linux/net/ipv4/netfilter/Makefile --- linux_org/net/ipv4/netfilter/Makefile 2003-08-13 19:19:30.000000000 +0200 +++ linux/net/ipv4/netfilter/Makefile 2006-10-27 14:11:52.000000000 +0200 @@ -30,8 +30,21 @@ # connection tracking obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o + +# connection tracking protocol helpers +obj-$(CONFIG_IP_NF_CT_PROTO_GRE) += ip_conntrack_proto_gre.o +ifdef CONFIG_IP_NF_CT_PROTO_GRE + export-objs += ip_conntrack_proto_gre.o +endif + +# NAT protocol helpers +obj-$(CONFIG_IP_NF_NAT_PROTO_GRE) += ip_nat_proto_gre.o # connection tracking helpers +obj-$(CONFIG_IP_NF_PPTP) += ip_conntrack_pptp.o +ifdef CONFIG_IP_NF_NAT_PPTP + export-objs += ip_conntrack_pptp.o +endif obj-$(CONFIG_IP_NF_AMANDA) += ip_conntrack_amanda.o ifdef CONFIG_IP_NF_AMANDA export-objs += ip_conntrack_amanda.o @@ -49,6 +62,7 @@ endif # NAT helpers +obj-$(CONFIG_IP_NF_NAT_PPTP) += ip_nat_pptp.o obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o obj-$(CONFIG_IP_NF_NAT_TFTP) += ip_nat_tftp.o obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o diff -uNr linux_org/net/ipv4/netfilter/ip_conntrack_core.c linux/net/ipv4/netfilter/ip_conntrack_core.c --- linux_org/net/ipv4/netfilter/ip_conntrack_core.c 2004-11-24 12:14:04.000000000 +0100 +++ linux/net/ipv4/netfilter/ip_conntrack_core.c 2006-10-27 14:11:52.000000000 +0200 @@ -142,6 +142,8 @@ tuple->dst.ip = iph->daddr; tuple->dst.protonum = iph->protocol; + tuple->src.u.all = tuple->dst.u.all = 0; + ret = protocol->pkt_to_tuple((u_int32_t *)iph + iph->ihl, len - 4*iph->ihl, tuple); @@ -157,6 +159,8 @@ inverse->dst.ip = orig->src.ip; inverse->dst.protonum = orig->dst.protonum; + inverse->src.u.all = inverse->dst.u.all = 0; + return protocol->invert_tuple(inverse, orig); } @@ -945,8 +949,8 @@ * so there is no need to use the tuple lock too */ DEBUGP("ip_conntrack_expect_related %p\n", related_to); - DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple); - DEBUGP("mask: "); DUMP_TUPLE(&expect->mask); + DEBUGP("tuple: "); DUMP_TUPLE_RAW(&expect->tuple); + DEBUGP("mask: "); DUMP_TUPLE_RAW(&expect->mask); old = LIST_FIND(&ip_conntrack_expect_list, resent_expect, struct ip_conntrack_expect *, &expect->tuple, @@ -1063,15 +1067,14 @@ MUST_BE_READ_LOCKED(&ip_conntrack_lock); WRITE_LOCK(&ip_conntrack_expect_tuple_lock); - DEBUGP("change_expect:\n"); - DEBUGP("exp tuple: "); DUMP_TUPLE(&expect->tuple); - DEBUGP("exp mask: "); DUMP_TUPLE(&expect->mask); - DEBUGP("newtuple: "); DUMP_TUPLE(newtuple); + DEBUGP("exp tuple: "); DUMP_TUPLE_RAW(&expect->tuple); + DEBUGP("exp mask: "); DUMP_TUPLE_RAW(&expect->mask); + DEBUGP("newtuple: "); DUMP_TUPLE_RAW(newtuple); if (expect->ct_tuple.dst.protonum == 0) { /* Never seen before */ DEBUGP("change expect: never seen before\n"); - if (!ip_ct_tuple_equal(&expect->tuple, newtuple) + if (!ip_ct_tuple_mask_cmp(&expect->tuple, newtuple, &expect->mask) && LIST_FIND(&ip_conntrack_expect_list, expect_clash, struct ip_conntrack_expect *, newtuple, &expect->mask)) { /* Force NAT to find an unused tuple */ diff -uNr linux_org/net/ipv4/netfilter/ip_conntrack_core.c.orig linux/net/ipv4/netfilter/ip_conntrack_core.c.orig --- linux_org/net/ipv4/netfilter/ip_conntrack_core.c.orig 1970-01-01 01:00:00.000000000 +0100 +++ linux/net/ipv4/netfilter/ip_conntrack_core.c.orig 2004-11-24 12:14:04.000000000 +0100 @@ -0,0 +1,1446 @@ +/* Connection state tracking for netfilter. This is separated from, + but required by, the NAT layer; it can also be used by an iptables + extension. */ + +/* (c) 1999 Paul `Rusty' Russell. Licenced under the GNU General + * Public Licence. + * + * 23 Apr 2001: Harald Welte + * - new API and handling of conntrack/nat helpers + * - now capable of multiple expectations for one master + * 16 Jul 2002: Harald Welte + * - add usage/reference counts to ip_conntrack_expect + * - export ip_conntrack[_expect]_{find_get,put} functions + * */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* For ERR_PTR(). Yeah, I know... --RR */ +#include + +/* This rwlock protects the main hash table, protocol/helper/expected + registrations, conntrack timers*/ +#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock) +#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_conntrack_lock) + +#include +#include +#include +#include +#include + +#define IP_CONNTRACK_VERSION "2.1" + +#if 0 +#define DEBUGP printk +#else +#define DEBUGP(format, args...) +#endif + +DECLARE_RWLOCK(ip_conntrack_lock); +DECLARE_RWLOCK(ip_conntrack_expect_tuple_lock); + +void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL; +LIST_HEAD(ip_conntrack_expect_list); +LIST_HEAD(protocol_list); +static LIST_HEAD(helpers); +unsigned int ip_conntrack_htable_size = 0; +int ip_conntrack_max = 0; +static atomic_t ip_conntrack_count = ATOMIC_INIT(0); +struct list_head *ip_conntrack_hash; +static kmem_cache_t *ip_conntrack_cachep; + +extern struct ip_conntrack_protocol ip_conntrack_generic_protocol; + +static inline int proto_cmpfn(const struct ip_conntrack_protocol *curr, + u_int8_t protocol) +{ + return protocol == curr->proto; +} + +struct ip_conntrack_protocol *__ip_ct_find_proto(u_int8_t protocol) +{ + struct ip_conntrack_protocol *p; + + MUST_BE_READ_LOCKED(&ip_conntrack_lock); + p = LIST_FIND(&protocol_list, proto_cmpfn, + struct ip_conntrack_protocol *, protocol); + if (!p) + p = &ip_conntrack_generic_protocol; + + return p; +} + +struct ip_conntrack_protocol *ip_ct_find_proto(u_int8_t protocol) +{ + struct ip_conntrack_protocol *p; + + READ_LOCK(&ip_conntrack_lock); + p = __ip_ct_find_proto(protocol); + READ_UNLOCK(&ip_conntrack_lock); + return p; +} + +inline void +ip_conntrack_put(struct ip_conntrack *ct) +{ + IP_NF_ASSERT(ct); + IP_NF_ASSERT(ct->infos[0].master); + /* nf_conntrack_put wants to go via an info struct, so feed it + one at random. */ + nf_conntrack_put(&ct->infos[0]); +} + +static int ip_conntrack_hash_rnd_initted; +static unsigned int ip_conntrack_hash_rnd; + +static u_int32_t +hash_conntrack(const struct ip_conntrack_tuple *tuple) +{ +#if 0 + dump_tuple(tuple); +#endif + return (jhash_3words(tuple->src.ip, + (tuple->dst.ip ^ tuple->dst.protonum), + (tuple->src.u.all | (tuple->dst.u.all << 16)), + ip_conntrack_hash_rnd) % ip_conntrack_htable_size); +} + +inline int +get_tuple(const struct iphdr *iph, size_t len, + struct ip_conntrack_tuple *tuple, + struct ip_conntrack_protocol *protocol) +{ + int ret; + + /* Never happen */ + if (iph->frag_off & htons(IP_OFFSET)) { + printk("ip_conntrack_core: Frag of proto %u.\n", + iph->protocol); + return 0; + } + /* Guarantee 8 protocol bytes: if more wanted, use len param */ + else if (iph->ihl * 4 + 8 > len) + return 0; + + tuple->src.ip = iph->saddr; + tuple->dst.ip = iph->daddr; + tuple->dst.protonum = iph->protocol; + + ret = protocol->pkt_to_tuple((u_int32_t *)iph + iph->ihl, + len - 4*iph->ihl, + tuple); + return ret; +} + +static int +invert_tuple(struct ip_conntrack_tuple *inverse, + const struct ip_conntrack_tuple *orig, + const struct ip_conntrack_protocol *protocol) +{ + inverse->src.ip = orig->dst.ip; + inverse->dst.ip = orig->src.ip; + inverse->dst.protonum = orig->dst.protonum; + + return protocol->invert_tuple(inverse, orig); +} + + +/* ip_conntrack_expect helper functions */ + +/* Compare tuple parts depending on mask. */ +static inline int expect_cmp(const struct ip_conntrack_expect *i, + const struct ip_conntrack_tuple *tuple) +{ + MUST_BE_READ_LOCKED(&ip_conntrack_expect_tuple_lock); + return ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask); +} + +static void +destroy_expect(struct ip_conntrack_expect *exp) +{ + DEBUGP("destroy_expect(%p) use=%d\n", exp, atomic_read(&exp->use)); + IP_NF_ASSERT(atomic_read(&exp->use) == 0); + IP_NF_ASSERT(!timer_pending(&exp->timeout)); + + kfree(exp); +} + +inline void ip_conntrack_expect_put(struct ip_conntrack_expect *exp) +{ + IP_NF_ASSERT(exp); + + if (atomic_dec_and_test(&exp->use)) { + /* usage count dropped to zero */ + destroy_expect(exp); + } +} + +static inline struct ip_conntrack_expect * +__ip_ct_expect_find(const struct ip_conntrack_tuple *tuple) +{ + MUST_BE_READ_LOCKED(&ip_conntrack_lock); + MUST_BE_READ_LOCKED(&ip_conntrack_expect_tuple_lock); + return LIST_FIND(&ip_conntrack_expect_list, expect_cmp, + struct ip_conntrack_expect *, tuple); +} + +/* Find a expectation corresponding to a tuple. */ +struct ip_conntrack_expect * +ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple) +{ + struct ip_conntrack_expect *exp; + + READ_LOCK(&ip_conntrack_lock); + READ_LOCK(&ip_conntrack_expect_tuple_lock); + exp = __ip_ct_expect_find(tuple); + if (exp) + atomic_inc(&exp->use); + READ_UNLOCK(&ip_conntrack_expect_tuple_lock); + READ_UNLOCK(&ip_conntrack_lock); + + return exp; +} + +/* remove one specific expectation from all lists and drop refcount, + * does _NOT_ delete the timer. */ +static void __unexpect_related(struct ip_conntrack_expect *expect) +{ + DEBUGP("unexpect_related(%p)\n", expect); + MUST_BE_WRITE_LOCKED(&ip_conntrack_lock); + + /* we're not allowed to unexpect a confirmed expectation! */ + IP_NF_ASSERT(!expect->sibling); + + /* delete from global and local lists */ + list_del(&expect->list); + list_del(&expect->expected_list); + + /* decrement expect-count of master conntrack */ + if (expect->expectant) + expect->expectant->expecting--; + + ip_conntrack_expect_put(expect); +} + +/* remove one specific expecatation from all lists, drop refcount + * and expire timer. + * This function can _NOT_ be called for confirmed expects! */ +static void unexpect_related(struct ip_conntrack_expect *expect) +{ + IP_NF_ASSERT(expect->expectant); + IP_NF_ASSERT(expect->expectant->helper); + /* if we are supposed to have a timer, but we can't delete + * it: race condition. __unexpect_related will + * be calledd by timeout function */ + if (expect->expectant->helper->timeout + && !del_timer(&expect->timeout)) + return; + + __unexpect_related(expect); +} + +/* delete all unconfirmed expectations for this conntrack */ +static void remove_expectations(struct ip_conntrack *ct, int drop_refcount) +{ + struct list_head *exp_entry, *next; + struct ip_conntrack_expect *exp; + + DEBUGP("remove_expectations(%p)\n", ct); + + list_for_each_safe(exp_entry, next, &ct->sibling_list) { + exp = list_entry(exp_entry, struct ip_conntrack_expect, + expected_list); + + /* we skip established expectations, as we want to delete + * the un-established ones only */ + if (exp->sibling) { + DEBUGP("remove_expectations: skipping established %p of %p\n", exp->sibling, ct); + if (drop_refcount) { + /* Indicate that this expectations parent is dead */ + ip_conntrack_put(exp->expectant); + exp->expectant = NULL; + } + continue; + } + + IP_NF_ASSERT(list_inlist(&ip_conntrack_expect_list, exp)); + IP_NF_ASSERT(exp->expectant == ct); + + /* delete expectation from global and private lists */ + unexpect_related(exp); + } +} + +static void +clean_from_lists(struct ip_conntrack *ct) +{ + unsigned int ho, hr; + + DEBUGP("clean_from_lists(%p)\n", ct); + MUST_BE_WRITE_LOCKED(&ip_conntrack_lock); + + ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); + LIST_DELETE(&ip_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]); + LIST_DELETE(&ip_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]); + + /* Destroy all un-established, pending expectations */ + remove_expectations(ct, 1); +} + +static void +destroy_conntrack(struct nf_conntrack *nfct) +{ + struct ip_conntrack *ct = (struct ip_conntrack *)nfct, *master = NULL; + struct ip_conntrack_protocol *proto; + + DEBUGP("destroy_conntrack(%p)\n", ct); + IP_NF_ASSERT(atomic_read(&nfct->use) == 0); + IP_NF_ASSERT(!timer_pending(&ct->timeout)); + + /* To make sure we don't get any weird locking issues here: + * destroy_conntrack() MUST NOT be called with a write lock + * to ip_conntrack_lock!!! -HW */ + proto = ip_ct_find_proto(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); + if (proto && proto->destroy) + proto->destroy(ct); + + if (ip_conntrack_destroyed) + ip_conntrack_destroyed(ct); + + WRITE_LOCK(&ip_conntrack_lock); + /* Make sure don't leave any orphaned expectations lying around */ + if (ct->expecting) + remove_expectations(ct, 1); + + /* Delete our master expectation */ + if (ct->master) { + if (ct->master->expectant) { + /* can't call __unexpect_related here, + * since it would screw up expect_list */ + list_del(&ct->master->expected_list); + master = ct->master->expectant; + } + kfree(ct->master); + } + WRITE_UNLOCK(&ip_conntrack_lock); + + if (master) + ip_conntrack_put(master); + + DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct); + kmem_cache_free(ip_conntrack_cachep, ct); + atomic_dec(&ip_conntrack_count); +} + +static void death_by_timeout(unsigned long ul_conntrack) +{ + struct ip_conntrack *ct = (void *)ul_conntrack; + + WRITE_LOCK(&ip_conntrack_lock); + clean_from_lists(ct); + WRITE_UNLOCK(&ip_conntrack_lock); + ip_conntrack_put(ct); +} + +static inline int +conntrack_tuple_cmp(const struct ip_conntrack_tuple_hash *i, + const struct ip_conntrack_tuple *tuple, + const struct ip_conntrack *ignored_conntrack) +{ + MUST_BE_READ_LOCKED(&ip_conntrack_lock); + return i->ctrack != ignored_conntrack + && ip_ct_tuple_equal(tuple, &i->tuple); +} + +static struct ip_conntrack_tuple_hash * +__ip_conntrack_find(const struct ip_conntrack_tuple *tuple, + const struct ip_conntrack *ignored_conntrack) +{ + struct ip_conntrack_tuple_hash *h; + unsigned int hash = hash_conntrack(tuple); + + MUST_BE_READ_LOCKED(&ip_conntrack_lock); + h = LIST_FIND(&ip_conntrack_hash[hash], + conntrack_tuple_cmp, + struct ip_conntrack_tuple_hash *, + tuple, ignored_conntrack); + return h; +} + +/* Find a connection corresponding to a tuple. */ +struct ip_conntrack_tuple_hash * +ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple, + const struct ip_conntrack *ignored_conntrack) +{ + struct ip_conntrack_tuple_hash *h; + + READ_LOCK(&ip_conntrack_lock); + h = __ip_conntrack_find(tuple, ignored_conntrack); + if (h) + atomic_inc(&h->ctrack->ct_general.use); + READ_UNLOCK(&ip_conntrack_lock); + + return h; +} + +static inline struct ip_conntrack * +__ip_conntrack_get(struct nf_ct_info *nfct, enum ip_conntrack_info *ctinfo) +{ + struct ip_conntrack *ct + = (struct ip_conntrack *)nfct->master; + + /* ctinfo is the index of the nfct inside the conntrack */ + *ctinfo = nfct - ct->infos; + IP_NF_ASSERT(*ctinfo >= 0 && *ctinfo < IP_CT_NUMBER); + return ct; +} + +/* Return conntrack and conntrack_info given skb->nfct->master */ +struct ip_conntrack * +ip_conntrack_get(struct sk_buff *skb, enum ip_conntrack_info *ctinfo) +{ + if (skb->nfct) + return __ip_conntrack_get(skb->nfct, ctinfo); + return NULL; +} + +/* Confirm a connection given skb->nfct; places it in hash table */ +int +__ip_conntrack_confirm(struct nf_ct_info *nfct) +{ + unsigned int hash, repl_hash; + struct ip_conntrack *ct; + enum ip_conntrack_info ctinfo; + + ct = __ip_conntrack_get(nfct, &ctinfo); + + /* ipt_REJECT uses ip_conntrack_attach to attach related + ICMP/TCP RST packets in other direction. Actual packet + which created connection will be IP_CT_NEW or for an + expected connection, IP_CT_RELATED. */ + if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) + return NF_ACCEPT; + + hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); + + /* We're not in hash table, and we refuse to set up related + connections for unconfirmed conns. But packet copies and + REJECT will give spurious warnings here. */ + /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ + + /* No external references means noone else could have + confirmed us. */ + IP_NF_ASSERT(!is_confirmed(ct)); + DEBUGP("Confirming conntrack %p\n", ct); + + WRITE_LOCK(&ip_conntrack_lock); + /* See if there's one in the list already, including reverse: + NAT could have grabbed it without realizing, since we're + not in the hash. If there is, we lost race. */ + if (!LIST_FIND(&ip_conntrack_hash[hash], + conntrack_tuple_cmp, + struct ip_conntrack_tuple_hash *, + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL) + && !LIST_FIND(&ip_conntrack_hash[repl_hash], + conntrack_tuple_cmp, + struct ip_conntrack_tuple_hash *, + &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) { + list_prepend(&ip_conntrack_hash[hash], + &ct->tuplehash[IP_CT_DIR_ORIGINAL]); + list_prepend(&ip_conntrack_hash[repl_hash], + &ct->tuplehash[IP_CT_DIR_REPLY]); + /* Timer relative to confirmation time, not original + setting time, otherwise we'd get timer wrap in + weird delay cases. */ + ct->timeout.expires += jiffies; + add_timer(&ct->timeout); + atomic_inc(&ct->ct_general.use); + set_bit(IPS_CONFIRMED_BIT, &ct->status); + WRITE_UNLOCK(&ip_conntrack_lock); + return NF_ACCEPT; + } + + WRITE_UNLOCK(&ip_conntrack_lock); + return NF_DROP; +} + +/* Returns true if a connection correspondings to the tuple (required + for NAT). */ +int +ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple, + const struct ip_conntrack *ignored_conntrack) +{ + struct ip_conntrack_tuple_hash *h; + + READ_LOCK(&ip_conntrack_lock); + h = __ip_conntrack_find(tuple, ignored_conntrack); + READ_UNLOCK(&ip_conntrack_lock); + + return h != NULL; +} + +/* Returns conntrack if it dealt with ICMP, and filled in skb fields */ +struct ip_conntrack * +icmp_error_track(struct sk_buff *skb, + enum ip_conntrack_info *ctinfo, + unsigned int hooknum) +{ + const struct iphdr *iph = skb->nh.iph; + struct icmphdr *hdr; + struct ip_conntrack_tuple innertuple, origtuple; + struct iphdr *inner; + size_t datalen; + struct ip_conntrack_protocol *innerproto; + struct ip_conntrack_tuple_hash *h; + + IP_NF_ASSERT(iph->protocol == IPPROTO_ICMP); + IP_NF_ASSERT(skb->nfct == NULL); + + hdr = (struct icmphdr *)((u_int32_t *)iph + iph->ihl); + inner = (struct iphdr *)(hdr + 1); + datalen = skb->len - iph->ihl*4 - sizeof(*hdr); + + if (skb->len < iph->ihl * 4 + sizeof(*hdr) + sizeof(*iph)) { + DEBUGP("icmp_error_track: too short\n"); + return NULL; + } + + if (hdr->type != ICMP_DEST_UNREACH + && hdr->type != ICMP_SOURCE_QUENCH + && hdr->type != ICMP_TIME_EXCEEDED + && hdr->type != ICMP_PARAMETERPROB + && hdr->type != ICMP_REDIRECT) + return NULL; + + /* Ignore ICMP's containing fragments (shouldn't happen) */ + if (inner->frag_off & htons(IP_OFFSET)) { + DEBUGP("icmp_error_track: fragment of proto %u\n", + inner->protocol); + return NULL; + } + + /* Ignore it if the checksum's bogus. */ + if (ip_compute_csum((unsigned char *)hdr, sizeof(*hdr) + datalen)) { + DEBUGP("icmp_error_track: bad csum\n"); + return NULL; + } + + innerproto = ip_ct_find_proto(inner->protocol); + /* Are they talking about one of our connections? */ + if (inner->ihl * 4 + 8 > datalen + || !get_tuple(inner, datalen, &origtuple, innerproto)) { + DEBUGP("icmp_error: ! get_tuple p=%u (%u*4+%u dlen=%u)\n", + inner->protocol, inner->ihl, 8, + datalen); + return NULL; + } + + /* Ordinarily, we'd expect the inverted tupleproto, but it's + been preserved inside the ICMP. */ + if (!invert_tuple(&innertuple, &origtuple, innerproto)) { + DEBUGP("icmp_error_track: Can't invert tuple\n"); + return NULL; + } + + *ctinfo = IP_CT_RELATED; + + h = ip_conntrack_find_get(&innertuple, NULL); + if (!h) { + /* Locally generated ICMPs will match inverted if they + haven't been SNAT'ed yet */ + /* FIXME: NAT code has to handle half-done double NAT --RR */ + if (hooknum == NF_IP_LOCAL_OUT) + h = ip_conntrack_find_get(&origtuple, NULL); + + if (!h) { + DEBUGP("icmp_error_track: no match\n"); + return NULL; + } + /* Reverse direction from that found */ + if (DIRECTION(h) != IP_CT_DIR_REPLY) + *ctinfo += IP_CT_IS_REPLY; + } else { + if (DIRECTION(h) == IP_CT_DIR_REPLY) + *ctinfo += IP_CT_IS_REPLY; + } + + /* Update skb to refer to this connection */ + skb->nfct = &h->ctrack->infos[*ctinfo]; + return h->ctrack; +} + +/* There's a small race here where we may free a just-assured + connection. Too bad: we're in trouble anyway. */ +static inline int unreplied(const struct ip_conntrack_tuple_hash *i) +{ + return !(test_bit(IPS_ASSURED_BIT, &i->ctrack->status)); +} + +static int early_drop(struct list_head *chain) +{ + /* Traverse backwards: gives us oldest, which is roughly LRU */ + struct ip_conntrack_tuple_hash *h; + int dropped = 0; + + READ_LOCK(&ip_conntrack_lock); + h = LIST_FIND_B(chain, unreplied, struct ip_conntrack_tuple_hash *); + if (h) + atomic_inc(&h->ctrack->ct_general.use); + READ_UNLOCK(&ip_conntrack_lock); + + if (!h) + return dropped; + + if (del_timer(&h->ctrack->timeout)) { + death_by_timeout((unsigned long)h->ctrack); + dropped = 1; + } + ip_conntrack_put(h->ctrack); + return dropped; +} + +static inline int helper_cmp(const struct ip_conntrack_helper *i, + const struct ip_conntrack_tuple *rtuple) +{ + return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask); +} + +struct ip_conntrack_helper *ip_ct_find_helper(const struct ip_conntrack_tuple *tuple) +{ + return LIST_FIND(&helpers, helper_cmp, + struct ip_conntrack_helper *, + tuple); +} + +/* Allocate a new conntrack: we return -ENOMEM if classification + failed due to stress. Otherwise it really is unclassifiable. */ +static struct ip_conntrack_tuple_hash * +init_conntrack(const struct ip_conntrack_tuple *tuple, + struct ip_conntrack_protocol *protocol, + struct sk_buff *skb) +{ + struct ip_conntrack *conntrack; + struct ip_conntrack_tuple repl_tuple; + size_t hash; + struct ip_conntrack_expect *expected; + int i; + static unsigned int drop_next = 0; + + if (!ip_conntrack_hash_rnd_initted) { + get_random_bytes(&ip_conntrack_hash_rnd, 4); + ip_conntrack_hash_rnd_initted = 1; + } + + hash = hash_conntrack(tuple); + + if (ip_conntrack_max && + atomic_read(&ip_conntrack_count) >= ip_conntrack_max) { + /* Try dropping from random chain, or else from the + chain about to put into (in case they're trying to + bomb one hash chain). */ + unsigned int next = (drop_next++)%ip_conntrack_htable_size; + + if (!early_drop(&ip_conntrack_hash[next]) + && !early_drop(&ip_conntrack_hash[hash])) { + if (net_ratelimit()) + printk(KERN_WARNING + "ip_conntrack: table full, dropping" + " packet.\n"); + return ERR_PTR(-ENOMEM); + } + } + + if (!invert_tuple(&repl_tuple, tuple, protocol)) { + DEBUGP("Can't invert tuple.\n"); + return NULL; + } + + conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC); + if (!conntrack) { + DEBUGP("Can't allocate conntrack.\n"); + return ERR_PTR(-ENOMEM); + } + + memset(conntrack, 0, sizeof(*conntrack)); + atomic_set(&conntrack->ct_general.use, 1); + conntrack->ct_general.destroy = destroy_conntrack; + conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *tuple; + conntrack->tuplehash[IP_CT_DIR_ORIGINAL].ctrack = conntrack; + conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = repl_tuple; + conntrack->tuplehash[IP_CT_DIR_REPLY].ctrack = conntrack; + for (i=0; i < IP_CT_NUMBER; i++) + conntrack->infos[i].master = &conntrack->ct_general; + + if (!protocol->new(conntrack, skb->nh.iph, skb->len)) { + kmem_cache_free(ip_conntrack_cachep, conntrack); + return NULL; + } + /* Don't set timer yet: wait for confirmation */ + init_timer(&conntrack->timeout); + conntrack->timeout.data = (unsigned long)conntrack; + conntrack->timeout.function = death_by_timeout; + + INIT_LIST_HEAD(&conntrack->sibling_list); + + WRITE_LOCK(&ip_conntrack_lock); + /* Need finding and deleting of expected ONLY if we win race */ + READ_LOCK(&ip_conntrack_expect_tuple_lock); + expected = LIST_FIND(&ip_conntrack_expect_list, expect_cmp, + struct ip_conntrack_expect *, tuple); + READ_UNLOCK(&ip_conntrack_expect_tuple_lock); + + /* If master is not in hash table yet (ie. packet hasn't left + this machine yet), how can other end know about expected? + Hence these are not the droids you are looking for (if + master ct never got confirmed, we'd hold a reference to it + and weird things would happen to future packets). */ + if (expected && !is_confirmed(expected->expectant)) + expected = NULL; + + /* Look up the conntrack helper for master connections only */ + if (!expected) + conntrack->helper = ip_ct_find_helper(&repl_tuple); + + /* If the expectation is dying, then this is a looser. */ + if (expected + && expected->expectant->helper->timeout + && ! del_timer(&expected->timeout)) + expected = NULL; + + if (expected) { + DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n", + conntrack, expected); + /* Welcome, Mr. Bond. We've been expecting you... */ + __set_bit(IPS_EXPECTED_BIT, &conntrack->status); + conntrack->master = expected; + expected->sibling = conntrack; + LIST_DELETE(&ip_conntrack_expect_list, expected); + expected->expectant->expecting--; + nf_conntrack_get(&master_ct(conntrack)->infos[0]); + } + atomic_inc(&ip_conntrack_count); + WRITE_UNLOCK(&ip_conntrack_lock); + + if (expected && expected->expectfn) + expected->expectfn(conntrack); + return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL]; +} + +/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ +static inline struct ip_conntrack * +resolve_normal_ct(struct sk_buff *skb, + struct ip_conntrack_protocol *proto, + int *set_reply, + unsigned int hooknum, + enum ip_conntrack_info *ctinfo) +{ + struct ip_conntrack_tuple tuple; + struct ip_conntrack_tuple_hash *h; + + IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0); + + if (!get_tuple(skb->nh.iph, skb->len, &tuple, proto)) + return NULL; + + /* look for tuple match */ + h = ip_conntrack_find_get(&tuple, NULL); + if (!h) { + h = init_conntrack(&tuple, proto, skb); + if (!h) + return NULL; + if (IS_ERR(h)) + return (void *)h; + } + + /* It exists; we have (non-exclusive) reference. */ + if (DIRECTION(h) == IP_CT_DIR_REPLY) { + *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; + /* Please set reply bit if this packet OK */ + *set_reply = 1; + } else { + /* Once we've had two way comms, always ESTABLISHED. */ + if (test_bit(IPS_SEEN_REPLY_BIT, &h->ctrack->status)) { + DEBUGP("ip_conntrack_in: normal packet for %p\n", + h->ctrack); + *ctinfo = IP_CT_ESTABLISHED; + } else if (test_bit(IPS_EXPECTED_BIT, &h->ctrack->status)) { + DEBUGP("ip_conntrack_in: related packet for %p\n", + h->ctrack); + *ctinfo = IP_CT_RELATED; + } else { + DEBUGP("ip_conntrack_in: new packet for %p\n", + h->ctrack); + *ctinfo = IP_CT_NEW; + } + *set_reply = 0; + } + skb->nfct = &h->ctrack->infos[*ctinfo]; + return h->ctrack; +} + +/* Netfilter hook itself. */ +unsigned int ip_conntrack_in(unsigned int hooknum, + struct sk_buff **pskb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) +{ + struct ip_conntrack *ct; + enum ip_conntrack_info ctinfo; + struct ip_conntrack_protocol *proto; + int set_reply; + int ret; + + /* FIXME: Do this right please. --RR */ + (*pskb)->nfcache |= NFC_UNKNOWN; + +/* Doesn't cover locally-generated broadcast, so not worth it. */ +#if 0 + /* Ignore broadcast: no `connection'. */ + if ((*pskb)->pkt_type == PACKET_BROADCAST) { + printk("Broadcast packet!\n"); + return NF_ACCEPT; + } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) + == htonl(0x000000FF)) { + printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n", + NIPQUAD((*pskb)->nh.iph->saddr), + NIPQUAD((*pskb)->nh.iph->daddr), + (*pskb)->sk, (*pskb)->pkt_type); + } +#endif + + /* Previously seen (loopback)? Ignore. Do this before + fragment check. */ + if ((*pskb)->nfct) + return NF_ACCEPT; + + /* Gather fragments. */ + if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { + *pskb = ip_ct_gather_frags(*pskb); + if (!*pskb) + return NF_STOLEN; + } + + proto = ip_ct_find_proto((*pskb)->nh.iph->protocol); + + /* It may be an icmp error... */ + if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP + && icmp_error_track(*pskb, &ctinfo, hooknum)) + return NF_ACCEPT; + + if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo))) + /* Not valid part of a connection */ + return NF_ACCEPT; + + if (IS_ERR(ct)) + /* Too stressed to deal. */ + return NF_DROP; + + IP_NF_ASSERT((*pskb)->nfct); + + ret = proto->packet(ct, (*pskb)->nh.iph, (*pskb)->len, ctinfo); + if (ret == -1) { + /* Invalid */ + nf_conntrack_put((*pskb)->nfct); + (*pskb)->nfct = NULL; + return NF_ACCEPT; + } + + if (ret != NF_DROP && ct->helper) { + ret = ct->helper->help((*pskb)->nh.iph, (*pskb)->len, + ct, ctinfo); + if (ret == -1) { + /* Invalid */ + nf_conntrack_put((*pskb)->nfct); + (*pskb)->nfct = NULL; + return NF_ACCEPT; + } + } + if (set_reply) + set_bit(IPS_SEEN_REPLY_BIT, &ct->status); + + return ret; +} + +int invert_tuplepr(struct ip_conntrack_tuple *inverse, + const struct ip_conntrack_tuple *orig) +{ + return invert_tuple(inverse, orig, ip_ct_find_proto(orig->dst.protonum)); +} + +static inline int resent_expect(const struct ip_conntrack_expect *i, + const struct ip_conntrack_tuple *tuple, + const struct ip_conntrack_tuple *mask) +{ + DEBUGP("resent_expect\n"); + DEBUGP(" tuple: "); DUMP_TUPLE(&i->tuple); + DEBUGP("ct_tuple: "); DUMP_TUPLE(&i->ct_tuple); + DEBUGP("test tuple: "); DUMP_TUPLE(tuple); + return (((i->ct_tuple.dst.protonum == 0 && ip_ct_tuple_equal(&i->tuple, tuple)) + || (i->ct_tuple.dst.protonum && ip_ct_tuple_equal(&i->ct_tuple, tuple))) + && ip_ct_tuple_equal(&i->mask, mask)); +} + +/* Would two expected things clash? */ +static inline int expect_clash(const struct ip_conntrack_expect *i, + const struct ip_conntrack_tuple *tuple, + const struct ip_conntrack_tuple *mask) +{ + /* Part covered by intersection of masks must be unequal, + otherwise they clash */ + struct ip_conntrack_tuple intersect_mask + = { { i->mask.src.ip & mask->src.ip, + { i->mask.src.u.all & mask->src.u.all } }, + { i->mask.dst.ip & mask->dst.ip, + { i->mask.dst.u.all & mask->dst.u.all }, + i->mask.dst.protonum & mask->dst.protonum } }; + + return ip_ct_tuple_mask_cmp(&i->tuple, tuple, &intersect_mask); +} + +inline void ip_conntrack_unexpect_related(struct ip_conntrack_expect *expect) +{ + WRITE_LOCK(&ip_conntrack_lock); + unexpect_related(expect); + WRITE_UNLOCK(&ip_conntrack_lock); +} + +static void expectation_timed_out(unsigned long ul_expect) +{ + struct ip_conntrack_expect *expect = (void *) ul_expect; + + DEBUGP("expectation %p timed out\n", expect); + WRITE_LOCK(&ip_conntrack_lock); + __unexpect_related(expect); + WRITE_UNLOCK(&ip_conntrack_lock); +} + +/* Add a related connection. */ +int ip_conntrack_expect_related(struct ip_conntrack *related_to, + struct ip_conntrack_expect *expect) +{ + struct ip_conntrack_expect *old, *new; + int ret = 0; + + WRITE_LOCK(&ip_conntrack_lock); + /* Because of the write lock, no reader can walk the lists, + * so there is no need to use the tuple lock too */ + + DEBUGP("ip_conntrack_expect_related %p\n", related_to); + DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple); + DEBUGP("mask: "); DUMP_TUPLE(&expect->mask); + + old = LIST_FIND(&ip_conntrack_expect_list, resent_expect, + struct ip_conntrack_expect *, &expect->tuple, + &expect->mask); + if (old) { + /* Helper private data may contain offsets but no pointers + pointing into the payload - otherwise we should have to copy + the data filled out by the helper over the old one */ + DEBUGP("expect_related: resent packet\n"); + if (related_to->helper->timeout) { + if (!del_timer(&old->timeout)) { + /* expectation is dying. Fall through */ + old = NULL; + } else { + old->timeout.expires = jiffies + + related_to->helper->timeout * HZ; + add_timer(&old->timeout); + } + } + + if (old) { + WRITE_UNLOCK(&ip_conntrack_lock); + return -EEXIST; + } + } else if (related_to->helper->max_expected && + related_to->expecting >= related_to->helper->max_expected) { + /* old == NULL */ + if (!(related_to->helper->flags & + IP_CT_HELPER_F_REUSE_EXPECT)) { + WRITE_UNLOCK(&ip_conntrack_lock); + if (net_ratelimit()) + printk(KERN_WARNING + "ip_conntrack: max number of expected " + "connections %i of %s reached for " + "%u.%u.%u.%u->%u.%u.%u.%u\n", + related_to->helper->max_expected, + related_to->helper->name, + NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip), + NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip)); + return -EPERM; + } + DEBUGP("ip_conntrack: max number of expected " + "connections %i of %s reached for " + "%u.%u.%u.%u->%u.%u.%u.%u, reusing\n", + related_to->helper->max_expected, + related_to->helper->name, + NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip), + NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip)); + + /* choose the the oldest expectation to evict */ + list_for_each_entry(old, &related_to->sibling_list, + expected_list) + if (old->sibling == NULL) + break; + + /* We cannot fail since related_to->expecting is the number + * of unconfirmed expectations */ + IP_NF_ASSERT(old && old->sibling == NULL); + + /* newnat14 does not reuse the real allocated memory + * structures but rather unexpects the old and + * allocates a new. unexpect_related will decrement + * related_to->expecting. + */ + unexpect_related(old); + ret = -EPERM; + } else if (LIST_FIND(&ip_conntrack_expect_list, expect_clash, + struct ip_conntrack_expect *, &expect->tuple, + &expect->mask)) { + WRITE_UNLOCK(&ip_conntrack_lock); + DEBUGP("expect_related: busy!\n"); + return -EBUSY; + } + + new = (struct ip_conntrack_expect *) + kmalloc(sizeof(struct ip_conntrack_expect), GFP_ATOMIC); + if (!new) { + WRITE_UNLOCK(&ip_conntrack_lock); + DEBUGP("expect_relaed: OOM allocating expect\n"); + return -ENOMEM; + } + + DEBUGP("new expectation %p of conntrack %p\n", new, related_to); + memcpy(new, expect, sizeof(*expect)); + new->expectant = related_to; + new->sibling = NULL; + atomic_set(&new->use, 1); + + /* add to expected list for this connection */ + list_add_tail(&new->expected_list, &related_to->sibling_list); + /* add to global list of expectations */ + list_prepend(&ip_conntrack_expect_list, &new->list); + /* add and start timer if required */ + if (related_to->helper->timeout) { + init_timer(&new->timeout); + new->timeout.data = (unsigned long)new; + new->timeout.function = expectation_timed_out; + new->timeout.expires = jiffies + + related_to->helper->timeout * HZ; + add_timer(&new->timeout); + } + related_to->expecting++; + + WRITE_UNLOCK(&ip_conntrack_lock); + + return ret; +} + +/* Change tuple in an existing expectation */ +int ip_conntrack_change_expect(struct ip_conntrack_expect *expect, + struct ip_conntrack_tuple *newtuple) +{ + int ret; + + MUST_BE_READ_LOCKED(&ip_conntrack_lock); + WRITE_LOCK(&ip_conntrack_expect_tuple_lock); + + DEBUGP("change_expect:\n"); + DEBUGP("exp tuple: "); DUMP_TUPLE(&expect->tuple); + DEBUGP("exp mask: "); DUMP_TUPLE(&expect->mask); + DEBUGP("newtuple: "); DUMP_TUPLE(newtuple); + if (expect->ct_tuple.dst.protonum == 0) { + /* Never seen before */ + DEBUGP("change expect: never seen before\n"); + if (!ip_ct_tuple_equal(&expect->tuple, newtuple) + && LIST_FIND(&ip_conntrack_expect_list, expect_clash, + struct ip_conntrack_expect *, newtuple, &expect->mask)) { + /* Force NAT to find an unused tuple */ + ret = -1; + } else { + memcpy(&expect->ct_tuple, &expect->tuple, sizeof(expect->tuple)); + memcpy(&expect->tuple, newtuple, sizeof(expect->tuple)); + ret = 0; + } + } else { + /* Resent packet */ + DEBUGP("change expect: resent packet\n"); + if (ip_ct_tuple_equal(&expect->tuple, newtuple)) { + ret = 0; + } else { + /* Force NAT to choose again the same port */ + ret = -1; + } + } + WRITE_UNLOCK(&ip_conntrack_expect_tuple_lock); + + return ret; +} + +/* Alter reply tuple (maybe alter helper). If it's already taken, + return 0 and don't do alteration. */ +int ip_conntrack_alter_reply(struct ip_conntrack *conntrack, + const struct ip_conntrack_tuple *newreply) +{ + WRITE_LOCK(&ip_conntrack_lock); + if (__ip_conntrack_find(newreply, conntrack)) { + WRITE_UNLOCK(&ip_conntrack_lock); + return 0; + } + /* Should be unconfirmed, so not in hash table yet */ + IP_NF_ASSERT(!is_confirmed(conntrack)); + + DEBUGP("Altering reply tuple of %p to ", conntrack); + DUMP_TUPLE(newreply); + + conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; + if (!conntrack->master && list_empty(&conntrack->sibling_list)) + conntrack->helper = ip_ct_find_helper(newreply); + WRITE_UNLOCK(&ip_conntrack_lock); + + return 1; +} + +int ip_conntrack_helper_register(struct ip_conntrack_helper *me) +{ + MOD_INC_USE_COUNT; + + WRITE_LOCK(&ip_conntrack_lock); + list_prepend(&helpers, me); + WRITE_UNLOCK(&ip_conntrack_lock); + + return 0; +} + +static inline int unhelp(struct ip_conntrack_tuple_hash *i, + const struct ip_conntrack_helper *me) +{ + if (i->ctrack->helper == me) { + /* Get rid of any expected. */ + remove_expectations(i->ctrack, 0); + /* And *then* set helper to NULL */ + i->ctrack->helper = NULL; + } + return 0; +} + +void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me) +{ + unsigned int i; + + /* Need write lock here, to delete helper. */ + WRITE_LOCK(&ip_conntrack_lock); + LIST_DELETE(&helpers, me); + + /* Get rid of expecteds, set helpers to NULL. */ + for (i = 0; i < ip_conntrack_htable_size; i++) + LIST_FIND_W(&ip_conntrack_hash[i], unhelp, + struct ip_conntrack_tuple_hash *, me); + WRITE_UNLOCK(&ip_conntrack_lock); + + /* Someone could be still looking at the helper in a bh. */ + br_write_lock_bh(BR_NETPROTO_LOCK); + br_write_unlock_bh(BR_NETPROTO_LOCK); + + MOD_DEC_USE_COUNT; +} + +/* Refresh conntrack for this many jiffies. */ +void ip_ct_refresh(struct ip_conntrack *ct, unsigned long extra_jiffies) +{ + IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct); + + WRITE_LOCK(&ip_conntrack_lock); + /* If not in hash table, timer will not be active yet */ + if (!is_confirmed(ct)) + ct->timeout.expires = extra_jiffies; + else { + /* Need del_timer for race avoidance (may already be dying). */ + if (del_timer(&ct->timeout)) { + ct->timeout.expires = jiffies + extra_jiffies; + add_timer(&ct->timeout); + } + } + WRITE_UNLOCK(&ip_conntrack_lock); +} + +/* Returns new sk_buff, or NULL */ +struct sk_buff * +ip_ct_gather_frags(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; +#ifdef CONFIG_NETFILTER_DEBUG + unsigned int olddebug = skb->nf_debug; +#endif + if (sk) { + sock_hold(sk); + skb_orphan(skb); + } + + local_bh_disable(); + skb = ip_defrag(skb); + local_bh_enable(); + + if (!skb) { + if (sk) sock_put(sk); + return skb; + } else if (skb_is_nonlinear(skb) && skb_linearize(skb, GFP_ATOMIC) != 0) { + kfree_skb(skb); + if (sk) sock_put(sk); + return NULL; + } + + if (sk) { + skb_set_owner_w(skb, sk); + sock_put(sk); + } + + ip_send_check(skb->nh.iph); + skb->nfcache |= NFC_ALTERED; +#ifdef CONFIG_NETFILTER_DEBUG + /* Packet path as if nothing had happened. */ + skb->nf_debug = olddebug; +#endif + return skb; +} + +/* Used by ipt_REJECT. */ +static void ip_conntrack_attach(struct sk_buff *nskb, struct nf_ct_info *nfct) +{ + struct ip_conntrack *ct; + enum ip_conntrack_info ctinfo; + + ct = __ip_conntrack_get(nfct, &ctinfo); + + /* This ICMP is in reverse direction to the packet which + caused it */ + if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) + ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; + else + ctinfo = IP_CT_RELATED; + + /* Attach new skbuff, and increment count */ + nskb->nfct = &ct->infos[ctinfo]; + atomic_inc(&ct->ct_general.use); +} + +static inline int +do_kill(const struct ip_conntrack_tuple_hash *i, + int (*kill)(const struct ip_conntrack *i, void *data), + void *data) +{ + return kill(i->ctrack, data); +} + +/* Bring out ya dead! */ +static struct ip_conntrack_tuple_hash * +get_next_corpse(int (*kill)(const struct ip_conntrack *i, void *data), + void *data, unsigned int *bucket) +{ + struct ip_conntrack_tuple_hash *h = NULL; + + READ_LOCK(&ip_conntrack_lock); + for (; !h && *bucket < ip_conntrack_htable_size; (*bucket)++) { + h = LIST_FIND(&ip_conntrack_hash[*bucket], do_kill, + struct ip_conntrack_tuple_hash *, kill, data); + } + if (h) + atomic_inc(&h->ctrack->ct_general.use); + READ_UNLOCK(&ip_conntrack_lock); + + return h; +} + +void +ip_ct_selective_cleanup(int (*kill)(const struct ip_conntrack *i, void *data), + void *data) +{ + struct ip_conntrack_tuple_hash *h; + unsigned int bucket = 0; + + while ((h = get_next_corpse(kill, data, &bucket)) != NULL) { + /* Time to push up daises... */ + if (del_timer(&h->ctrack->timeout)) + death_by_timeout((unsigned long)h->ctrack); + /* ... else the timer will get him soon. */ + + ip_conntrack_