* Kuhn's copyrights are licensed GPLv2-or-later. File as a whole remains GPLv2.
*/
+//config:config WGET
+//config: bool "wget (35 kb)"
+//config: default y
+//config: help
+//config: wget is a utility for non-interactive download of files from HTTP
+//config: and FTP servers.
+//config:
+//config:config FEATURE_WGET_LONG_OPTIONS
+//config: bool "Enable long options"
+//config: default y
+//config: depends on WGET && LONG_OPTS
+//config:
+//config:config FEATURE_WGET_STATUSBAR
+//config: bool "Enable progress bar (+2k)"
+//config: default y
+//config: depends on WGET
+//config:
+//config:config FEATURE_WGET_AUTHENTICATION
+//config: bool "Enable HTTP authentication"
+//config: default y
+//config: depends on WGET
+//config: help
+//config: Support authenticated HTTP transfers.
+//config:
+//config:config FEATURE_WGET_TIMEOUT
+//config: bool "Enable timeout option -T SEC"
+//config: default y
+//config: depends on WGET
+//config: help
+//config: Supports network read and connect timeouts for wget,
+//config: so that wget will give up and timeout, through the -T
+//config: command line option.
+//config:
+//config: Currently only connect and network data read timeout are
+//config: supported (i.e., timeout is not applied to the DNS query). When
+//config: FEATURE_WGET_LONG_OPTIONS is also enabled, the --timeout option
+//config: will work in addition to -T.
+//config:
+//config:config FEATURE_WGET_HTTPS
+//config: bool "Support HTTPS using internal TLS code"
+//config: default y
+//config: depends on WGET
+//config: select TLS
+//config: help
+//config: wget will use internal TLS code to connect to https:// URLs.
+//config: Note:
+//config: On NOMMU machines, ssl_helper applet should be available
+//config: in the $PATH for this to work. Make sure to select that applet.
+//config:
+//config: Note: currently, TLS code only makes TLS I/O work, it
+//config: does *not* check that the peer is who it claims to be, etc.
+//config: IOW: it uses peer-supplied public keys to establish encryption
+//config: and signing keys, then encrypts and signs outgoing data and
+//config: decrypts incoming data.
+//config: It does not check signature hashes on the incoming data:
+//config: this means that attackers manipulating TCP packets can
+//config: send altered data and we unknowingly receive garbage.
+//config: (This check might be relatively easy to add).
+//config: It does not check public key's certificate:
+//config: this means that the peer may be an attacker impersonating
+//config: the server we think we are talking to.
+//config:
+//config: If you think this is unacceptable, consider this. As more and more
+//config: servers switch to HTTPS-only operation, without such "crippled"
+//config: TLS code it is *impossible* to simply download a kernel source
+//config: from kernel.org. Which can in real world translate into
+//config: "my small automatic tooling to build cross-compilers from sources
+//config: no longer works, I need to additionally keep a local copy
+//config: of ~4 megabyte source tarball of a SSL library and ~2 megabyte
+//config: source of wget, need to compile and built both before I can
+//config: download anything. All this despite the fact that the build
+//config: is done in a QEMU sandbox on a machine with absolutely nothing
+//config: worth stealing, so I don't care if someone would go to a lot
+//config: of trouble to intercept my HTTPS download to send me an altered
+//config: kernel tarball".
+//config:
+//config: If you still think this is unacceptable, send patches.
+//config:
+//config: If you still think this is unacceptable, do not want to send
+//config: patches, but do want to waste bandwidth expaining how wrong
+//config: it is, you will be ignored.
+//config:
+//config:config FEATURE_WGET_OPENSSL
+//config: bool "Try to connect to HTTPS using openssl"
+//config: default y
+//config: depends on WGET
+//config: help
+//config: Try to use openssl to handle HTTPS.
+//config:
+//config: OpenSSL has a simple SSL client for debug purposes.
+//config: If you select this option, wget will effectively run:
+//config: "openssl s_client -quiet -connect hostname:443
+//config: -servername hostname 2>/dev/null" and pipe its data
+//config: through it. -servername is not used if hostname is numeric.
+//config: Note inconvenient API: host resolution is done twice,
+//config: and there is no guarantee openssl's idea of IPv6 address
+//config: format is the same as ours.
+//config: Another problem is that s_client prints debug information
+//config: to stderr, and it needs to be suppressed. This means
+//config: all error messages get suppressed too.
+//config: openssl is also a big binary, often dynamically linked
+//config: against ~15 libraries.
+//config:
+//config: If openssl can't be executed, internal TLS code will be used
+//config: (if you enabled it); if openssl can be executed but fails later,
+//config: wget can't detect this, and download will fail.
+
+//applet:IF_WGET(APPLET(wget, BB_DIR_USR_BIN, BB_SUID_DROP))
+
+//kbuild:lib-$(CONFIG_WGET) += wget.o
+
//usage:#define wget_trivial_usage
//usage: IF_FEATURE_WGET_LONG_OPTIONS(
-//usage: "[-c|--continue] [-s|--spider] [-q|--quiet] [-O|--output-document FILE]\n"
+//usage: "[-c|--continue] [--spider] [-q|--quiet] [-O|--output-document FILE]\n"
//usage: " [--header 'header: value'] [-Y|--proxy on/off] [-P DIR]\n"
/* Since we ignore these opts, we don't show them in --help */
-/* //usage: " [--no-check-certificate] [--no-cache]" */
-//usage: " [-U|--user-agent AGENT]" IF_FEATURE_WGET_TIMEOUT(" [-T SEC]") " URL..."
+/* //usage: " [--no-check-certificate] [--no-cache] [--passive-ftp] [-t TRIES]" */
+/* //usage: " [-nv] [-nc] [-nH] [-np]" */
+//usage: " [-S|--server-response] [-U|--user-agent AGENT]" IF_FEATURE_WGET_TIMEOUT(" [-T SEC]") " URL..."
//usage: )
//usage: IF_NOT_FEATURE_WGET_LONG_OPTIONS(
-//usage: "[-csq] [-O FILE] [-Y on/off] [-P DIR] [-U AGENT]"
+//usage: "[-cq] [-O FILE] [-Y on/off] [-P DIR] [-S] [-U AGENT]"
//usage: IF_FEATURE_WGET_TIMEOUT(" [-T SEC]") " URL..."
//usage: )
//usage:#define wget_full_usage "\n\n"
//usage: "Retrieve files via HTTP or FTP\n"
-//usage: "\n -s Spider mode - only check file existence"
-//usage: "\n -c Continue retrieval of aborted transfer"
-//usage: "\n -q Quiet"
-//usage: "\n -P DIR Save to DIR (default .)"
+//usage: IF_FEATURE_WGET_LONG_OPTIONS(
+//usage: "\n --spider Only check URL existence: $? is 0 if exists"
+//usage: )
+//usage: "\n -c Continue retrieval of aborted transfer"
+//usage: "\n -q Quiet"
+//usage: "\n -P DIR Save to DIR (default .)"
+//usage: "\n -S Show server response"
//usage: IF_FEATURE_WGET_TIMEOUT(
-//usage: "\n -T SEC Network read timeout is SEC seconds"
+//usage: "\n -T SEC Network read timeout is SEC seconds"
//usage: )
-//usage: "\n -O FILE Save to FILE ('-' for stdout)"
-//usage: "\n -U STR Use STR for User-Agent header"
-//usage: "\n -Y Use proxy ('on' or 'off')"
+//usage: "\n -O FILE Save to FILE ('-' for stdout)"
+//usage: "\n -U STR Use STR for User-Agent header"
+//usage: "\n -Y on/off Use proxy"
#include "libbb.h"
#endif
+#define SSL_SUPPORTED (ENABLE_FEATURE_WGET_OPENSSL || ENABLE_FEATURE_WGET_HTTPS)
+
struct host_info {
char *allocated;
const char *path;
char *host;
int port;
};
-static const char P_FTP[] = "ftp";
-static const char P_HTTP[] = "http";
-static const char P_HTTPS[] = "https";
+static const char P_FTP[] ALIGN1 = "ftp";
+static const char P_HTTP[] ALIGN1 = "http";
+#if SSL_SUPPORTED
+static const char P_HTTPS[] ALIGN1 = "https";
+#endif
#if ENABLE_FEATURE_WGET_LONG_OPTIONS
/* User-specified headers prevent using our corresponding built-in headers. */
const char *user_agent; /* "User-Agent" header field */
#if ENABLE_FEATURE_WGET_TIMEOUT
unsigned timeout_seconds;
- bool connecting;
+ bool die_if_timed_out;
#endif
int output_fd;
int o_flags;
/* Must match option string! */
enum {
WGET_OPT_CONTINUE = (1 << 0),
- WGET_OPT_SPIDER = (1 << 1),
- WGET_OPT_QUIET = (1 << 2),
+ WGET_OPT_QUIET = (1 << 1),
+ WGET_OPT_SERVER_RESPONSE = (1 << 2),
WGET_OPT_OUTNAME = (1 << 3),
WGET_OPT_PREFIX = (1 << 4),
WGET_OPT_PROXY = (1 << 5),
WGET_OPT_USER_AGENT = (1 << 6),
WGET_OPT_NETWORK_READ_TIMEOUT = (1 << 7),
WGET_OPT_RETRIES = (1 << 8),
- WGET_OPT_PASSIVE = (1 << 9),
+ WGET_OPT_nsomething = (1 << 9),
WGET_OPT_HEADER = (1 << 10) * ENABLE_FEATURE_WGET_LONG_OPTIONS,
WGET_OPT_POST_DATA = (1 << 11) * ENABLE_FEATURE_WGET_LONG_OPTIONS,
+ WGET_OPT_SPIDER = (1 << 12) * ENABLE_FEATURE_WGET_LONG_OPTIONS,
};
enum {
static void alarm_handler(int sig UNUSED_PARAM)
{
/* This is theoretically unsafe (uses stdio and malloc in signal handler) */
- if (G.connecting)
+ if (G.die_if_timed_out)
bb_error_msg_and_die("download timed out");
}
+static void set_alarm(void)
+{
+ if (G.timeout_seconds) {
+ alarm(G.timeout_seconds);
+ G.die_if_timed_out = 1;
+ }
+}
+# define clear_alarm() ((void)(G.die_if_timed_out = 0))
+#else
+# define set_alarm() ((void)0)
+# define clear_alarm() ((void)0)
+#endif
+
+#if ENABLE_FEATURE_WGET_OPENSSL
+/*
+ * is_ip_address() attempts to verify whether or not a string
+ * contains an IPv4 or IPv6 address (vs. an FQDN). The result
+ * of inet_pton() can be used to determine this.
+ *
+ * TODO add proper error checking when inet_pton() returns -1
+ * (some form of system error has occurred, and errno is set)
+ */
+static int is_ip_address(const char *string)
+{
+ struct sockaddr_in sa;
+
+ int result = inet_pton(AF_INET, string, &(sa.sin_addr));
+# if ENABLE_FEATURE_IPV6
+ if (result == 0) {
+ struct sockaddr_in6 sa6;
+ result = inet_pton(AF_INET6, string, &(sa6.sin6_addr));
+ }
+# endif
+ return (result == 1);
+}
#endif
static FILE *open_socket(len_and_sockaddr *lsa)
int fd;
FILE *fp;
- IF_FEATURE_WGET_TIMEOUT(alarm(G.timeout_seconds); G.connecting = 1;)
+ set_alarm();
fd = xconnect_stream(lsa);
- IF_FEATURE_WGET_TIMEOUT(G.connecting = 0;)
+ clear_alarm();
/* glibc 2.4 seems to try seeking on it - ??! */
/* hopefully it understands what ESPIPE means... */
}
/* Returns '\n' if it was seen, else '\0'. Trims at first '\r' or '\n' */
-/* FIXME: does not respect FEATURE_WGET_TIMEOUT and -T N: */
-static char fgets_and_trim(FILE *fp)
+static char fgets_and_trim(FILE *fp, const char *fmt)
{
char c;
char *buf_ptr;
+ set_alarm();
if (fgets(G.wget_buf, sizeof(G.wget_buf) - 1, fp) == NULL)
bb_perror_msg_and_die("error getting response");
+ clear_alarm();
buf_ptr = strchrnul(G.wget_buf, '\n');
c = *buf_ptr;
log_io("< %s", G.wget_buf);
+ if (fmt && (option_mask32 & WGET_OPT_SERVER_RESPONSE))
+ fprintf(stderr, fmt, G.wget_buf);
+
return c;
}
if (!s2)
s2 = "";
fprintf(fp, "%s%s\r\n", s1, s2);
+ /* With --server-response, wget also shows its ftp commands */
+ if (option_mask32 & WGET_OPT_SERVER_RESPONSE)
+ fprintf(stderr, "--> %s%s\n\n", s1, s2);
fflush(fp);
log_io("> %s%s", s1, s2);
}
do {
- fgets_and_trim(fp);
+ fgets_and_trim(fp, "%s\n");
} while (!isdigit(G.wget_buf[0]) || G.wget_buf[3] != ' ');
G.wget_buf[3] = '\0';
if (strcmp(url, P_FTP) == 0) {
h->port = bb_lookup_port(P_FTP, "tcp", 21);
} else
+#if SSL_SUPPORTED
if (strcmp(url, P_HTTPS) == 0) {
h->port = bb_lookup_port(P_HTTPS, "tcp", 443);
h->protocol = P_HTTPS;
} else
+#endif
if (strcmp(url, P_HTTP) == 0) {
http:
h->port = bb_lookup_port(P_HTTP, "tcp", 80);
// FYI:
// "Real" wget 'http://busybox.net?var=a/b' sends this request:
- // 'GET /?var=a/b HTTP 1.0'
+ // 'GET /?var=a/b HTTP/1.0'
// and saves 'index.html?var=a%2Fb' (we save 'b')
// wget 'http://busybox.net?login=john@doe':
// request: 'GET /?login=john@doe HTTP/1.0'
int c;
/* retrieve header line */
- c = fgets_and_trim(fp);
+ c = fgets_and_trim(fp, " %s\n");
/* end of the headers? */
if (G.wget_buf[0] == '\0')
return sfp;
}
-static int spawn_https_helper(const char *host, unsigned port)
+#if ENABLE_FEATURE_WGET_OPENSSL
+static int spawn_https_helper_openssl(const char *host, unsigned port)
{
char *allocated = NULL;
+ char *servername;
int sp[2];
int pid;
+ IF_FEATURE_WGET_HTTPS(volatile int child_failed = 0;)
if (socketpair(AF_UNIX, SOCK_STREAM, 0, sp) != 0)
/* Kernel can have AF_UNIX support disabled */
if (!strchr(host, ':'))
host = allocated = xasprintf("%s:%u", host, port);
+ servername = xstrdup(host);
+ strrchr(servername, ':')[0] = '\0';
- pid = BB_MMU ? xfork() : xvfork();
+ fflush_all();
+ pid = xvfork();
if (pid == 0) {
/* Child */
- char *argv[6];
+ char *argv[8];
close(sp[0]);
xmove_fd(sp[1], 0);
xdup2(0, 1);
- /*
- * TODO: develop a tiny ssl/tls helper (using matrixssl?),
- * try to exec it here before falling back to big fat openssl.
- */
/*
* openssl s_client -quiet -connect www.kernel.org:443 2>/dev/null
* It prints some debug stuff on stderr, don't know how to suppress it.
*/
xmove_fd(2, 3);
xopen("/dev/null", O_RDWR);
+ memset(&argv, 0, sizeof(argv));
argv[0] = (char*)"openssl";
argv[1] = (char*)"s_client";
argv[2] = (char*)"-quiet";
argv[3] = (char*)"-connect";
argv[4] = (char*)host;
- argv[5] = NULL;
+ /*
+ * Per RFC 6066 Section 3, the only permitted values in the
+ * TLS server_name (SNI) field are FQDNs (DNS hostnames).
+ * IPv4 and IPv6 addresses, port numbers are not allowed.
+ */
+ if (!is_ip_address(servername)) {
+ argv[5] = (char*)"-servername";
+ argv[6] = (char*)servername;
+ }
+
BB_EXECVP(argv[0], argv);
xmove_fd(3, 2);
+# if ENABLE_FEATURE_WGET_HTTPS
+ child_failed = 1;
+ xfunc_die();
+# else
bb_perror_msg_and_die("can't execute '%s'", argv[0]);
+# endif
/* notreached */
}
/* Parent */
+ free(servername);
free(allocated);
close(sp[1]);
+# if ENABLE_FEATURE_WGET_HTTPS
+ if (child_failed) {
+ close(sp[0]);
+ return -1;
+ }
+# endif
return sp[0];
}
+#endif
-/* See networking/ssl_helper/README */
-#define SSL_HELPER 0
-
-#if SSL_HELPER
-static void spawn_https_helper1(int network_fd)
+#if ENABLE_FEATURE_WGET_HTTPS
+static void spawn_ssl_client(const char *host, int network_fd)
{
int sp[2];
int pid;
+ char *servername, *p;
+
+ servername = xstrdup(host);
+ p = strrchr(servername, ':');
+ if (p) *p = '\0';
if (socketpair(AF_UNIX, SOCK_STREAM, 0, sp) != 0)
/* Kernel can have AF_UNIX support disabled */
bb_perror_msg_and_die("socketpair");
+ fflush_all();
pid = BB_MMU ? xfork() : xvfork();
if (pid == 0) {
/* Child */
- char *argv[3];
-
close(sp[0]);
xmove_fd(sp[1], 0);
xdup2(0, 1);
- xmove_fd(network_fd, 3);
- /*
- * A simple ssl/tls helper
- */
- argv[0] = (char*)"ssl_helper";
- argv[1] = (char*)"-d3";
- argv[2] = NULL;
- BB_EXECVP(argv[0], argv);
- bb_perror_msg_and_die("can't execute '%s'", argv[0]);
+ if (BB_MMU) {
+ tls_state_t *tls = new_tls_state();
+ tls->ifd = tls->ofd = network_fd;
+ tls_handshake(tls, servername);
+ tls_run_copy_loop(tls);
+ exit(0);
+ } else {
+ char *argv[5];
+ xmove_fd(network_fd, 3);
+ argv[0] = (char*)"ssl_client";
+ argv[1] = (char*)"-s3";
+ //TODO: if (!is_ip_address(servername))...
+ argv[2] = (char*)"-n";
+ argv[3] = servername;
+ argv[4] = NULL;
+ BB_EXECVP(argv[0], argv);
+ bb_perror_msg_and_die("can't execute '%s'", argv[0]);
+ }
/* notreached */
}
/* Parent */
+ free(servername);
close(sp[1]);
xmove_fd(sp[0], network_fd);
}
if (!G.chunked)
break;
- fgets_and_trim(dfp); /* Eat empty line */
+ fgets_and_trim(dfp, NULL); /* Eat empty line */
get_clen:
- fgets_and_trim(dfp);
+ fgets_and_trim(dfp, NULL);
G.content_len = STRTOOFF(G.wget_buf, NULL, 16);
/* FIXME: error check? */
if (G.content_len == 0)
int status;
/* Open socket to http(s) server */
+#if ENABLE_FEATURE_WGET_OPENSSL
+ /* openssl (and maybe internal TLS) support is configured */
if (target.protocol == P_HTTPS) {
-/* openssl-based helper
- * Inconvenient API since we can't give it an open fd
- */
- int fd = spawn_https_helper(server.host, server.port);
+ /* openssl-based helper
+ * Inconvenient API since we can't give it an open fd
+ */
+ int fd = spawn_https_helper_openssl(server.host, server.port);
+# if ENABLE_FEATURE_WGET_HTTPS
+ if (fd < 0) { /* no openssl? try internal */
+ sfp = open_socket(lsa);
+ spawn_ssl_client(server.host, fileno(sfp));
+ goto socket_opened;
+ }
+# else
+ /* We don't check for exec("openssl") failure in this case */
+# endif
sfp = fdopen(fd, "r+");
if (!sfp)
bb_perror_msg_and_die(bb_msg_memory_exhausted);
- } else
- sfp = open_socket(lsa);
-#if SSL_HELPER
+ goto socket_opened;
+ }
+ sfp = open_socket(lsa);
+ socket_opened:
+#elif ENABLE_FEATURE_WGET_HTTPS
+ /* Only internal TLS support is configured */
+ sfp = open_socket(lsa);
if (target.protocol == P_HTTPS)
- spawn_https_helper1(fileno(sfp));
+ spawn_ssl_client(server.host, fileno(sfp));
+#else
+ /* ssl (https) support is not configured */
+ sfp = open_socket(lsa);
#endif
/* Send HTTP request */
if (use_proxy) {
fflush(sfp);
+/* Tried doing this unconditionally.
+ * Cloudflare and nginx/1.11.5 are shocked to see SHUT_WR on non-HTTPS.
+ */
+#if SSL_SUPPORTED
+ if (target.protocol == P_HTTPS) {
+ /* If we use SSL helper, keeping our end of the socket open for writing
+ * makes our end (i.e. the same fd!) readable (EAGAIN instead of EOF)
+ * even after child closes its copy of the fd.
+ * This helps:
+ */
+ shutdown(fileno(sfp), SHUT_WR);
+ }
+#endif
+
/*
* Retrieve HTTP response line and check for "200" status code.
*/
read_response:
- fgets_and_trim(sfp);
+ fgets_and_trim(sfp, " %s\n");
str = G.wget_buf;
str = skip_non_whitespace(str);
while (gethdr(sfp) != NULL)
/* eat all remaining headers */;
goto read_response;
+
+ /* Success responses */
case 200:
+ /* fall through */
+ case 201: /* 201 Created */
+/* "The request has been fulfilled and resulted in a new resource being created" */
+ /* Standard wget is reported to treat this as success */
+ /* fall through */
+ case 202: /* 202 Accepted */
+/* "The request has been accepted for processing, but the processing has not been completed" */
+ /* Treat as success: fall through */
+ case 203: /* 203 Non-Authoritative Information */
+/* "Use of this response code is not required and is only appropriate when the response would otherwise be 200 (OK)" */
+ /* fall through */
+ case 204: /* 204 No Content */
/*
Response 204 doesn't say "null file", it says "metadata
has changed but data didn't":
However, in real world it was observed that some web servers
(e.g. Boa/0.94.14rc21) simply use code 204 when file size is zero.
*/
- case 204:
if (G.beg_range != 0) {
/* "Range:..." was not honored by the server.
* Restart download from the beginning.
reset_beg_range_to_zero();
}
break;
+ /* 205 Reset Content ?? what to do on this ?? */
+
case 300: /* redirection */
case 301:
case 302:
case 303:
break;
+
case 206: /* Partial Content */
if (G.beg_range != 0)
/* "Range:..." worked. Good. */
static const char wget_longopts[] ALIGN1 =
/* name, has_arg, val */
"continue\0" No_argument "c"
-//FIXME: -s isn't --spider, it's --save-headers!
- "spider\0" No_argument "s"
"quiet\0" No_argument "q"
+ "server-response\0" No_argument "S"
"output-document\0" Required_argument "O"
"directory-prefix\0" Required_argument "P"
"proxy\0" Required_argument "Y"
"user-agent\0" Required_argument "U"
-#if ENABLE_FEATURE_WGET_TIMEOUT
- "timeout\0" Required_argument "T"
-#endif
+IF_FEATURE_WGET_TIMEOUT(
+ "timeout\0" Required_argument "T")
/* Ignored: */
- // "tries\0" Required_argument "t"
+IF_DESKTOP( "tries\0" Required_argument "t")
+ "header\0" Required_argument "\xff"
+ "post-data\0" Required_argument "\xfe"
+ "spider\0" No_argument "\xfd"
/* Ignored (we always use PASV): */
- "passive-ftp\0" No_argument "\xff"
- "header\0" Required_argument "\xfe"
- "post-data\0" Required_argument "\xfd"
+IF_DESKTOP( "passive-ftp\0" No_argument "\xf0")
/* Ignored (we don't do ssl) */
- "no-check-certificate\0" No_argument "\xfc"
+IF_DESKTOP( "no-check-certificate\0" No_argument "\xf0")
/* Ignored (we don't support caching) */
- "no-cache\0" No_argument "\xfb"
+IF_DESKTOP( "no-cache\0" No_argument "\xf0")
+IF_DESKTOP( "no-verbose\0" No_argument "\xf0")
+IF_DESKTOP( "no-clobber\0" No_argument "\xf0")
+IF_DESKTOP( "no-host-directories\0" No_argument "\xf0")
+IF_DESKTOP( "no-parent\0" No_argument "\xf0")
;
#endif
#if ENABLE_FEATURE_WGET_LONG_OPTIONS
applet_long_options = wget_longopts;
#endif
- opt_complementary = "-1"
- IF_FEATURE_WGET_TIMEOUT(":T+")
- IF_FEATURE_WGET_LONG_OPTIONS(":\xfe::");
- getopt32(argv, "csqO:P:Y:U:T:" /*ignored:*/ "t:",
- &G.fname_out, &G.dir_prefix,
+ opt_complementary = "-1" /* at least one URL */
+ IF_FEATURE_WGET_LONG_OPTIONS(":\xff::"); /* --header is a list */
+ getopt32(argv, "cqSO:P:Y:U:T:+"
+ /*ignored:*/ "t:"
+ /*ignored:*/ "n::"
+ /* wget has exactly four -n<letter> opts, all of which we can ignore:
+ * -nv --no-verbose: be moderately quiet (-q is full quiet)
+ * -nc --no-clobber: abort if exists, neither download to FILE.n nor overwrite FILE
+ * -nH --no-host-directories: wget -r http://host/ won't create host/
+ * -np --no-parent
+ * "n::" above says that we accept -n[ARG].
+ * Specifying "n:" would be a bug: "-n ARG" would eat ARG!
+ */
+ , &G.fname_out, &G.dir_prefix,
&G.proxy_flag, &G.user_agent,
IF_FEATURE_WGET_TIMEOUT(&G.timeout_seconds) IF_NOT_FEATURE_WGET_TIMEOUT(NULL),
- NULL /* -t RETRIES */
+ NULL, /* -t RETRIES */
+ NULL /* -n[ARG] */
IF_FEATURE_WGET_LONG_OPTIONS(, &headers_llist)
IF_FEATURE_WGET_LONG_OPTIONS(, &G.post_data)
);
+#if 0 /* option bits debug */
+ if (option_mask32 & WGET_OPT_RETRIES) bb_error_msg("-t NUM");
+ if (option_mask32 & WGET_OPT_nsomething) bb_error_msg("-nsomething");
+ if (option_mask32 & WGET_OPT_HEADER) bb_error_msg("--header");
+ if (option_mask32 & WGET_OPT_POST_DATA) bb_error_msg("--post-data");
+ if (option_mask32 & WGET_OPT_SPIDER) bb_error_msg("--spider");
+ exit(0);
+#endif
argv += optind;
#if ENABLE_FEATURE_WGET_LONG_OPTIONS