httpd: Add option for specifying robots.txt

This commit is contained in:
Eelco Dolstra 2014-09-18 19:04:59 +02:00
parent 837a0c05e5
commit 0de982d75b
2 changed files with 16 additions and 5 deletions

View File

@ -209,10 +209,11 @@ let
''; '';
robotsTxt = robotsTxt =
concatStringsSep "\n" ( concatStringsSep "\n" (filter (x: x != "") (
# If this is a vhost, the include the entries for the main server as well. # If this is a vhost, the include the entries for the main server as well.
(if isMainServer then [] else map (svc: svc.robotsEntries) mainSubservices) (if isMainServer then [] else [mainCfg.robotsEntries] ++ map (svc: svc.robotsEntries) mainSubservices)
++ (map (svc: svc.robotsEntries) subservices)); ++ [cfg.robotsEntries]
++ (map (svc: svc.robotsEntries) subservices)));
in '' in ''
ServerName ${serverInfo.canonicalName} ServerName ${serverInfo.canonicalName}

View File

@ -142,9 +142,19 @@ with lib;
type = types.str; type = types.str;
default = "common"; default = "common";
example = "combined"; example = "combined";
description = " description = ''
Log format for Apache's log files. Possible values are: combined, common, referer, agent. Log format for Apache's log files. Possible values are: combined, common, referer, agent.
"; '';
};
robotsEntries = mkOption {
type = types.lines;
default = "";
example = "Disallow: /foo/";
description = ''
Specification of pages to be ignored by web crawlers. See <link
xlink:href='http://www.robotstxt.org/'/> for details.
'';
}; };
} }