summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Sokolovsky <paul.sokolovsky@linaro.org>2014-10-20 23:10:45 +0300
committerPaul Sokolovsky <paul.sokolovsky@linaro.org>2014-10-20 23:12:07 +0300
commit7225e131f2850959d9de80e6169c3af85b1d7d06 (patch)
tree041a083d4bd5d2015f31cc90156ebfdfe055387f
parent436e2a1dae4817c9267b1ba41cf2d0c839ba4c7b (diff)
ansible: Migrate to http://git.linaro.org/infrastructure/ansible-playbooks.git
Change-Id: I7c9452c5037a4baf67516d87f0168901f58e6979
-rw-r--r--ansible/.gitignore2
-rw-r--r--ansible/README90
-rw-r--r--ansible/Vagrantfile104
-rw-r--r--ansible/ansible.cfg2
-rw-r--r--ansible/files/android-build.linaro.org.conf89
-rw-r--r--ansible/files/gd_bundle.crt53
-rw-r--r--ansible/files/jenkins.conf48
-rw-r--r--ansible/files/jenkins_slaves.xml.tcwg-ci320
-rw-r--r--ansible/files/jenkins_users.xml6
-rw-r--r--ansible/filter_plugins/custom_plugins.py17
-rw-r--r--ansible/frontend.yml12
-rw-r--r--ansible/group_vars/all48
-rw-r--r--ansible/group_vars/android-build7
-rw-r--r--ansible/group_vars/ci5
-rw-r--r--ansible/group_vars/ec25
-rw-r--r--ansible/group_vars/production5
-rw-r--r--ansible/group_vars/rdk-ci5
-rw-r--r--ansible/group_vars/tcwg-ci7
-rw-r--r--ansible/group_vars/vagrant3
-rw-r--r--ansible/host_vars/ec2-devel-host1
-rw-r--r--ansible/host_vars/tcwg.ci.linaro.org5
-rw-r--r--ansible/hosts-devel-android-build19
-rw-r--r--ansible/hosts-devel-ci19
-rw-r--r--ansible/hosts-devel-rdk-ci19
-rw-r--r--ansible/hosts-devel-tcwg-ci19
-rw-r--r--ansible/hosts-prod26
-rw-r--r--ansible/jenkins-ec2-dirs.yml21
-rw-r--r--ansible/jenkins.yml41
-rwxr-xr-xansible/mangle-jobs-remote17
-rw-r--r--ansible/new_publish.yml46
-rw-r--r--ansible/reposeed.yml5
-rw-r--r--ansible/roles/apache-site/tasks/main.yml19
-rw-r--r--ansible/roles/apache/tasks/main.yml17
-rw-r--r--ansible/roles/common/handlers/main.yml7
-rw-r--r--ansible/roles/common/tasks/main.yml15
-rw-r--r--ansible/roles/env_validate/tasks/main.yml8
-rw-r--r--ansible/roles/frontend/tasks/main.yml120
-rw-r--r--ansible/roles/frontend/templates/settings_prod.py18
-rw-r--r--ansible/roles/frontend/vars/main.yml2
-rw-r--r--ansible/roles/jenkins-config-git/tasks/main.yml51
-rw-r--r--ansible/roles/jenkins-config-skeleton/files/jenkins-config/jobs/blank/config.xml40
-rw-r--r--ansible/roles/jenkins-config-skeleton/files/jenkins-config/users/admin/config.xml24
-rw-r--r--ansible/roles/jenkins-config-skeleton/handlers/main.yml7
-rw-r--r--ansible/roles/jenkins-config-skeleton/tasks/main.yml57
-rw-r--r--ansible/roles/jenkins-config-skeleton/templates/jenkins-config/config.xml109
-rw-r--r--ansible/roles/jenkins-config-skeleton/templates/jenkins-config/hudson.tasks.Mailer.xml15
-rw-r--r--ansible/roles/jenkins-cronjobs/tasks/main.yml28
-rw-r--r--ansible/roles/jenkins-linaro-theme/tasks/main.yml16
-rw-r--r--ansible/roles/jenkins-linaro-theme/templates/org.codefirst.SimpleThemeDecorator.xml5
-rw-r--r--ansible/roles/jenkins-ssh-publish/tasks/main.yml18
-rw-r--r--ansible/roles/jenkins-ssh-publish/templates/jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml87
-rw-r--r--ansible/roles/jenkins-user/tasks/main.yml8
-rw-r--r--ansible/roles/jenkins-user/tasks/templates/jenkins-config/users/frontend/config.xml23
-rw-r--r--ansible/roles/jenkins-utils-cron-disable/tasks/main.yml6
-rw-r--r--ansible/roles/jenkins-utils-publish-staging/tasks/main.yml8
-rw-r--r--ansible/roles/jenkins-utils-set-jenkins-url/tasks/main.yml12
-rw-r--r--ansible/roles/jenkins/handlers/main.yml7
-rw-r--r--ansible/roles/jenkins/tasks/jenkins-pkgs.yml44
-rw-r--r--ansible/roles/jenkins/tasks/jenkins-plugins.yml32
-rw-r--r--ansible/roles/jenkins/tasks/main.yml2
-rw-r--r--ansible/roles/jenkins/vars/main.yml63
-rw-r--r--ansible/roles/linaro-jenkins-tools/tasks/main.yml13
-rw-r--r--ansible/roles/new-publish/files/ssh_config_110
-rw-r--r--ansible/roles/new-publish/tasks/main.yml87
-rw-r--r--ansible/roles/new-publish/templates/publish-copy.j21
-rw-r--r--ansible/roles/new-publish/templates/publish-trigger.j21
-rw-r--r--ansible/roles/new-publish/vars/main.yml5
-rw-r--r--ansible/roles/publishing/templates/publish-copy.j21
-rw-r--r--ansible/roles/publishing/templates/publish-trigger.j21
-rw-r--r--ansible/roles/reposeed/tasks/main.yml8
-rw-r--r--ansible/roles/squid/files/squid.conf7165
-rw-r--r--ansible/roles/squid/files/squid.conf.org.diff41
-rw-r--r--ansible/roles/squid/tasks/main.yml11
-rw-r--r--ansible/roles/sslcert/tasks/main.yml8
-rw-r--r--ansible/roles/sslcert/tasks/production.yml10
-rw-r--r--ansible/roles/sslcert/tasks/self-signed.yml14
-rw-r--r--ansible/roles/sslcert/templates/ssleay.conf9
l---------ansible/secrets1
-rw-r--r--ansible/site.yml5
-rw-r--r--ansible/support/jenkins_plugins2ansible.py26
-rwxr-xr-xansible/update-production-jenkins.sh28
-rw-r--r--ansible/vars/empty.yml2
82 files changed, 0 insertions, 9381 deletions
diff --git a/ansible/.gitignore b/ansible/.gitignore
deleted file mode 100644
index 7d9478c..0000000
--- a/ansible/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.pyc
-.vagrant
diff --git a/ansible/README b/ansible/README
deleted file mode 100644
index 7d76baa..0000000
--- a/ansible/README
+++ /dev/null
@@ -1,90 +0,0 @@
-This directory contains proof-of-concept scripts for deploying Jenkins &
-Android Build infrastructure using Ansible http://www.ansibleworks.com/ .
-
-Development deployment
-----------------------
-
-To deploy development instance of particular Jenkins service to locally
-run Vagrant virtual machine, first select one of "hosts-devel-*" files
-corresponding to a particular service. Suppose, we want to deploy "tcwg-ci"
-service, and thus will use hosts-devel-tcwg-ci inventory file.
-
-0. Install vagrant and import "trusty32" box:
-
-Some newer version of vagrant:
-
- vagrant box add ubuntu/trusty32
-
-Older version of vagrant:
-
- vagrant box add trusty32 https://vagrantcloud.com/ubuntu/trusty32/version/1/provider/virtualbox.box
-
-1. Create new VM with:
-
- vagrant up
-
-You may want to check that you can login into VM (no host key conflicts, etc.):
-
- ssh -p2222 vagrant@localhost
-
-You can skip this first time, but if you get early SSH errors running
-Ansible in following steps, try the command above to diagnose.
-
-2. Check which hosts are targetted by the deployment process:
-
- ansible-playbook -i hosts-devel-tcwg-ci -l "tcwg-ci:&vagrant" site.yml --list-hosts
-
- '-l "tcwg-ci:&vagrant"' means "limit operations to hosts which are both
- in 'tcwg-ci' and 'vagrant' groups".
-
-Verify that this is consistent with your expectations. For Vagrant case, that should
-be localhost.
-
-3. Deploy infrastructure:
-
- ansible-playbook -i hosts-devel-tcwg-ci -l "tcwg-ci:&vagrant" site.yml
-
-Add -v for verboseness, repeat for more. Use other *.yml to deploy just parts
-of infrastructure (e.g. jenkins.yml will deploy just Jenkins service with
-dependencies like Apache).
-
-4. Once deployment is done, Jenkins can be accessed as https://localhost:6443/ ,
-Admin access: admin:admin.
-
-
-Setting up Jenkins master from production config backup
--------------------------------------------------------
-The above approach creates Jenkins config from a set of templates and
-high-level configuration parameters, that should be enough to bootstrap
-a new adhoc server, but doesn't scale to cloning existing high-volume
-production servers, like ci.linaro.org. For them, using config backup
-from existing server is more viable approach (so far). Of course,
-this raises questions of idempotency and declarative deployment
-specification, as well as security of resulting deployment. So, that's
-known issue, where formal configuration management clashes with
-imperfect reality. So, such set up mode has "imperative" parts, which
-run only once, and skipped on subsequent runs (if it was detected that
-they were already executed). This raises question if it should be supported
-to force re-running them - such operation would be regularly needed
-during initial configman development, and later during system testing.
-The current answer is "no", because price of mistake may be too high
-for production server, and because there's concern of explosive growth
-of number of parameters. So, currently, if you want to redeploy config
-for such setup from git, you need to ssh to the server and remove entire
-old config yourself.
-
-Currently, "from production backup" mode is default for production hosts.
-But as a new production host starts as a staging host, this can be overriden:
-
-ansible-playbook -i hosts-devel-ci -l "ci:&ec2" jenkins.yml -e jenkins_config_from_backup=true
-
-
-Managing production server
---------------------------
-
-To simplify management of production server(s), helper scripts which execute
-specific set of tasks (use Ansible tags, etc.) are provided:
-
-- update-production-jenkins.sh
-
-Please refer to individual scripts for more info.
diff --git a/ansible/Vagrantfile b/ansible/Vagrantfile
deleted file mode 100644
index 475fbc1..0000000
--- a/ansible/Vagrantfile
+++ /dev/null
@@ -1,104 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-#
-# This is vagrant config file for local VM setup of android-build
-#
-
-Vagrant::Config.run do |config|
- # All Vagrant configuration is done here. The most common configuration
- # options are documented and commented below. For a complete reference,
- # please see the online documentation at vagrantup.com.
-
- # Every Vagrant virtual environment requires a box to build off of.
- config.vm.box = "trusty32"
-
- # The url from where the 'config.vm.box' box will be fetched if it
- # doesn't already exist on the user's system.
- # config.vm.box_url = "http://domain.com/path/to/above.box"
-
- # Boot with a GUI so you can see the screen. (Default is headless)
- config.vm.boot_mode = :gui
-
- # Assign this VM to a host-only network IP, allowing you to access it
- # via the IP. Host-only networks can talk to the host machine as well as
- # any other machines on the same network, but cannot be accessed (through this
- # network interface) by any external networks.
- # config.vm.network :hostonly, "192.168.33.10"
-
- # Assign this VM to a bridged network, allowing you to connect directly to a
- # network using the host's network device. This makes the VM appear as another
- # physical device on your network.
- # config.vm.network :bridged
-
- # Forward a port from the guest to the host, which allows for outside
- # computers to access the VM, whereas host only networking does not.
- config.vm.forward_port 80, 6080
- config.vm.forward_port 443, 6443
-
- # Share an additional folder to the guest VM. The first argument is
- # an identifier, the second is the path on the guest to mount the
- # folder, and the third is the path on the host to the actual folder.
- # config.vm.share_folder "v-data", "/vagrant_data", "../data"
-
- # Enable provisioning with Puppet stand alone. Puppet manifests
- # are contained in a directory path relative to this Vagrantfile.
- # You will need to create the manifests directory and a manifest in
- # the file base.pp in the manifests_path directory.
- #
- # An example Puppet manifest to provision the message of the day:
- #
- # # group { "puppet":
- # # ensure => "present",
- # # }
- # #
- # # File { owner => 0, group => 0, mode => 0644 }
- # #
- # # file { '/etc/motd':
- # # content => "Welcome to your Vagrant-built virtual machine!
- # # Managed by Puppet.\n"
- # # }
- #
- # config.vm.provision :puppet do |puppet|
- # puppet.manifests_path = "manifests"
- # puppet.manifest_file = "base.pp"
- # end
-
- # Enable provisioning with chef solo, specifying a cookbooks path, roles
- # path, and data_bags path (all relative to this Vagrantfile), and adding
- # some recipes and/or roles.
- #
- # config.vm.provision :chef_solo do |chef|
- # chef.cookbooks_path = "../my-recipes/cookbooks"
- # chef.roles_path = "../my-recipes/roles"
- # chef.data_bags_path = "../my-recipes/data_bags"
- # chef.add_recipe "mysql"
- # chef.add_role "web"
- #
- # # You may also specify custom JSON attributes:
- # chef.json = { :mysql_password => "foo" }
- # end
-
- # Enable provisioning with chef server, specifying the chef server URL,
- # and the path to the validation key (relative to this Vagrantfile).
- #
- # The Opscode Platform uses HTTPS. Substitute your organization for
- # ORGNAME in the URL and validation key.
- #
- # If you have your own Chef Server, use the appropriate URL, which may be
- # HTTP instead of HTTPS depending on your configuration. Also change the
- # validation key to validation.pem.
- #
- # config.vm.provision :chef_client do |chef|
- # chef.chef_server_url = "https://api.opscode.com/organizations/ORGNAME"
- # chef.validation_key_path = "ORGNAME-validator.pem"
- # end
- #
- # If you're using the Opscode platform, your validator client is
- # ORGNAME-validator, replacing ORGNAME with your organization name.
- #
- # IF you have your own Chef Server, the default validation client name is
- # chef-validator, unless you changed the configuration.
- #
- # chef.validation_client_name = "ORGNAME-validator"
-end
diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg
deleted file mode 100644
index 8ba8582..0000000
--- a/ansible/ansible.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-[ssh_connection]
-ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o ForwardAgent=yes
diff --git a/ansible/files/android-build.linaro.org.conf b/ansible/files/android-build.linaro.org.conf
deleted file mode 100644
index 84cf797..0000000
--- a/ansible/files/android-build.linaro.org.conf
+++ /dev/null
@@ -1,89 +0,0 @@
-<VirtualHost *:443>
- ServerAdmin webmaster@localhost
- ServerName android-build.linaro.org
- <Proxy *>
- Order deny,allow
- Allow from all
- </Proxy>
-
- RewriteEngine on
- RewriteRule ^/builds/~([a-z][-a-z0-9]+)/([-A-Za-z0-9_.]+)/([0-9]+)/output(.*) http://127.0.0.1:600/$1_$2/builds/$3/archive$4 [L,P]
- RewriteRule ^/builds/~([a-z][-a-z0-9]+)/([-A-Za-z0-9_.]+)/lastSuccessful/output(.*) http://127.0.0.1:600/$1_$2/lastSuccessful/archive$3 [L,P]
- RewriteRule ^/mockup(.*) $1 [R=301]
-
- ProxyPass /jenkins http://localhost:8080/jenkins
- ProxyPassReverse /jenkins http://localhost:8080/jenkins
-
-<Directory /home/build-system-frontend>
- <IfVersion < 2.3 >
- Order allow,deny
- Allow from all
- </IfVersion>
- <IfVersion >= 2.3>
- Require all granted
- </IfVersion>
-</Directory>
-
- Alias /static /home/build-system-frontend/frontend/static
- Alias /3.3.0/build /home/build-system-frontend/yui/build
-
- WSGIScriptAlias /combo /home/build-system-frontend/lazr-js/combo.wsgi
- WSGIScriptAlias / /home/build-system-frontend/frontend/linaro-abs-frontend.wsgi
-
- <LocationMatch "/(3.3.0/build|combo)">
- SetOutputFilter DEFLATE
- FileETag none
- ExpiresActive on
- ExpiresDefault "access plus 10 years"
- Header append Cache-Control "public"
- </LocationMatch>
-
- SSLEngine on
- SSLCertificateFile {{ssl_cert}}
- SSLCertificateKeyFile {{ssl_key}}
- SSLCACertificateFile /etc/ssl/certs/gd_bundle.crt
-</VirtualHost>
-
-<VirtualHost 127.0.0.1:600>
- ServerAdmin webmaster@localhost
- ServerName android-build.linaro.org
- DocumentRoot /var/lib/jenkins/jobs
-</VirtualHost>
-
-<VirtualHost *:80>
- ServerAdmin webmaster@localhost
- ServerName android-build.linaro.org
- Alias /download/ /var/lib/jenkins/jobs/
- # Provide shortcut URLs to access artifacts
- AliasMatch ^/builds/~([^/]+)/([^/]+)/(lastStable|lastSuccessful)(.*) /var/lib/jenkins/jobs/$1_$2/$3/archive/build/out$4
- AliasMatch ^/builds/~([^/]+)/([^/]+)/([^/]+)(.*) /var/lib/jenkins/jobs/$1_$2/builds/$3/archive/build/out$4
- <Directory "/var/lib/jenkins/jobs/">
- Options Indexes MultiViews FollowSymLinks
- AllowOverride None
- </Directory>
- Alias /seed/ /mnt2/seed/
- <Directory "/mnt2/seed/">
- Options Indexes MultiViews FollowSymLinks
- AllowOverride None
- Require all granted
-# Order deny,allow
-# Deny from all
-# Allow from 127.0.0.0/8 ::1/128 10.0.0.0/8
- </Directory>
-
-# RewriteLog /var/log/apache2/mod_rewrite_log
-# RewriteLogLevel 3
-
- RewriteEngine on
- RewriteCond %{REQUEST_URI} !^/(download|seed|builds)
- RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [redirect=301,last]
- # Allow to access everything in /builds/*
- RewriteCond %{REQUEST_URI} ^/builds/
- RewriteRule .* - [last]
- # Allow to access XMLs in build artifact archives
- RewriteCond %{REQUEST_URI} .+/archive/.+\.xml$
- RewriteRule .* - [last]
- # But disallow access any other XMLs (e.g. configs)
- RewriteCond %{REQUEST_URI} .+\.xml$
- RewriteRule .* - [forbidden]
-</VirtualHost>
diff --git a/ansible/files/gd_bundle.crt b/ansible/files/gd_bundle.crt
deleted file mode 100644
index 9aa63ce..0000000
--- a/ansible/files/gd_bundle.crt
+++ /dev/null
@@ -1,53 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIE3jCCA8agAwIBAgICAwEwDQYJKoZIhvcNAQEFBQAwYzELMAkGA1UEBhMCVVMx
-ITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g
-RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMTYw
-MTU0MzdaFw0yNjExMTYwMTU0MzdaMIHKMQswCQYDVQQGEwJVUzEQMA4GA1UECBMH
-QXJpem9uYTETMBEGA1UEBxMKU2NvdHRzZGFsZTEaMBgGA1UEChMRR29EYWRkeS5j
-b20sIEluYy4xMzAxBgNVBAsTKmh0dHA6Ly9jZXJ0aWZpY2F0ZXMuZ29kYWRkeS5j
-b20vcmVwb3NpdG9yeTEwMC4GA1UEAxMnR28gRGFkZHkgU2VjdXJlIENlcnRpZmlj
-YXRpb24gQXV0aG9yaXR5MREwDwYDVQQFEwgwNzk2OTI4NzCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAMQt1RWMnCZM7DI161+4WQFapmGBWTtwY6vj3D3H
-KrjJM9N55DrtPDAjhI6zMBS2sofDPZVUBJ7fmd0LJR4h3mUpfjWoqVTr9vcyOdQm
-VZWt7/v+WIbXnvQAjYwqDL1CBM6nPwT27oDyqu9SoWlm2r4arV3aLGbqGmu75RpR
-SgAvSMeYddi5Kcju+GZtCpyz8/x4fKL4o/K1w/O5epHBp+YlLpyo7RJlbmr2EkRT
-cDCVw5wrWCs9CHRK8r5RsL+H0EwnWGu1NcWdrxcx+AuP7q2BNgWJCJjPOq8lh8BJ
-6qf9Z/dFjpfMFDniNoW1fho3/Rb2cRGadDAW/hOUoz+EDU8CAwEAAaOCATIwggEu
-MB0GA1UdDgQWBBT9rGEyk2xF1uLuhV+auud2mWjM5zAfBgNVHSMEGDAWgBTSxLDS
-kdRMEXGzYcs9of7dqGrU4zASBgNVHRMBAf8ECDAGAQH/AgEAMDMGCCsGAQUFBwEB
-BCcwJTAjBggrBgEFBQcwAYYXaHR0cDovL29jc3AuZ29kYWRkeS5jb20wRgYDVR0f
-BD8wPTA7oDmgN4Y1aHR0cDovL2NlcnRpZmljYXRlcy5nb2RhZGR5LmNvbS9yZXBv
-c2l0b3J5L2dkcm9vdC5jcmwwSwYDVR0gBEQwQjBABgRVHSAAMDgwNgYIKwYBBQUH
-AgEWKmh0dHA6Ly9jZXJ0aWZpY2F0ZXMuZ29kYWRkeS5jb20vcmVwb3NpdG9yeTAO
-BgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBANKGwOy9+aG2Z+5mC6IG
-OgRQjhVyrEp0lVPLN8tESe8HkGsz2ZbwlFalEzAFPIUyIXvJxwqoJKSQ3kbTJSMU
-A2fCENZvD117esyfxVgqwcSeIaha86ykRvOe5GPLL5CkKSkB2XIsKd83ASe8T+5o
-0yGPwLPk9Qnt0hCqU7S+8MxZC9Y7lhyVJEnfzuz9p0iRFEUOOjZv2kWzRaJBydTX
-RE4+uXR21aITVSzGh6O1mawGhId/dQb8vxRMDsxuxN89txJx9OjxUUAiKEngHUuH
-qDTMBqLdElrRhjZkAzVvb3du6/KFUJheqwNTrZEjYx8WnM25sgVjOuH0aBsXBTWV
-U+4=
------END CERTIFICATE-----
------BEGIN CERTIFICATE-----
-MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
-MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
-YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
-MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
-ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
-MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
-ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
-PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
-wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
-EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
-avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
-YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
-sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
-/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
-IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
-YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
-ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
-OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
-TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
-HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
-dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
-ReYNnyicsbkqWletNw+vHX/bvZ8=
------END CERTIFICATE-----
diff --git a/ansible/files/jenkins.conf b/ansible/files/jenkins.conf
deleted file mode 100644
index 61e9f1f..0000000
--- a/ansible/files/jenkins.conf
+++ /dev/null
@@ -1,48 +0,0 @@
-<VirtualHost *:80>
- ServerAdmin webmaster@localhost
- ServerName {{site_name}}
-# Having >1 ServerName may cause problems with SNI, which may cause problems
-# with Java 7, which in turn may cause problems with jenkins-cli.jar, which we
-# want to use.
-# ServerName {{inventory_hostname}}
- ProxyRequests Off
- <Proxy *>
- Order deny,allow
- Allow from all
- </Proxy>
- ProxyPreserveHost on
- ProxyPass / http://localhost:{{jenkins_port}}/
-
- RewriteEngine on
- ReWriteCond %{SERVER_PORT} !^443$
- RewriteRule ^/(.*) https://%{HTTP_HOST}/$1 [NC,R,L]
-</VirtualHost>
-
-<VirtualHost *:443>
- ServerAdmin webmaster@localhost
- ServerName {{site_name}}
-# ServerName {{inventory_hostname}}
- ProxyRequests Off
- <Proxy *>
- Order deny,allow
- Allow from all
- </Proxy>
-
-{% if hosttype == "ci" %}
- # Compatibility redirect for ci.linaro.org
- RewriteEngine on
- RewriteRule ^/jenkins/(.*) https://%{HTTP_HOST}/$1 [R,L]
-{% endif %}
-
- # See https://wiki.jenkins-ci.org/display/JENKINS/Running+Jenkins+behind+Apache
- AllowEncodedSlashes NoDecode
- ProxyPreserveHost off
- # Note: Jenkins and Apache URL prefixes must match
- ProxyPass {{jenkins_prefix}} http://localhost:{{jenkins_port}}{{jenkins_prefix}} nocanon
- ProxyPassReverse {{jenkins_prefix}} http://localhost:{{jenkins_port}}{{jenkins_prefix}}
-
- SSLEngine on
- SSLCertificateFile {{ssl_cert}}
- SSLCertificateKeyFile {{ssl_key}}
- SSLCACertificateFile /etc/ssl/certs/gd_bundle.crt
-</VirtualHost>
diff --git a/ansible/files/jenkins_slaves.xml.tcwg-ci b/ansible/files/jenkins_slaves.xml.tcwg-ci
deleted file mode 100644
index 7c634c6..0000000
--- a/ansible/files/jenkins_slaves.xml.tcwg-ci
+++ /dev/null
@@ -1,320 +0,0 @@
- <slave>
- <name>x86_64-01</name>
- <description>LAVA Lab x86 TCWG Build Server</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41001</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>x86_64 tcwg</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>x86_64-02</name>
- <description>LAVA Lab x86 TCWG Build Server</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41002</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>x86_64 tcwg</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>x86_64-03</name>
- <description>LAVA Lab x86 TCWG Build Server</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41003</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>x86_64 tcwg</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>x86_64-04</name>
- <description>LAVA Lab x86 TCWG Build Server</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41004</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>x86_64 tcwg</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>x86_64-05</name>
- <description>LAVA Lab x86 TCWG Build Server</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41005</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>x86_64 tcwg</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>x86_64-06</name>
- <description>LAVA Lab x86 TCWG Build Server</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41006</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>x86_64 tcwg</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a15-01</name>
- <description>LAVA Lab ARM A15 Chromebook</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>40001</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a15 precise</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a15-02</name>
- <description>LAVA Lab ARM A15 Chromebook</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>40002</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a15 precise</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a15-03</name>
- <description>LAVA Lab ARM A15 Chromebook</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>40003</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a15 precise</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a15-04</name>
- <description>LAVA Lab ARM A15 Chromebook</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>40004</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a15 precise</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a15-05</name>
- <description>LAVA Lab ARM A15 Chromebook</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>40005</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a15 precise</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a15-06</name>
- <description>LAVA Lab ARM A15 Chromebook</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>40006</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a15 precise</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a15-07</name>
- <description>LAVA Lab ARM A15 Chromebook</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>40007</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a15 precise</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a15-08</name>
- <description>LAVA Lab ARM A15 Chromebook</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>40008</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a15 precise</label>
- <nodeProperties/>
- <userId>tyler.baker@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a8-01</name>
- <description>LAVA Lab ARM A8 Beaglebone Black</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41007</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a8 precise</label>
- <nodeProperties/>
- <userId>alan.bennett@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a8-02</name>
- <description>LAVA Lab ARM A8 Beaglebone Black</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41008</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a8 precise</label>
- <nodeProperties/>
- <userId>rob.savoye@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a8-03</name>
- <description>LAVA Lab ARM A8 Beaglebone Black</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41009</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a8 precise</label>
- <nodeProperties/>
- <userId>rob.savoye@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a8-04</name>
- <description>LAVA Lab ARM A8 Beaglebone Black</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41010</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a8 precise</label>
- <nodeProperties/>
- <userId>rob.savoye@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a8-05</name>
- <description>LAVA Lab ARM A8 Beaglebone Black</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41011</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a8 precise</label>
- <nodeProperties/>
- <userId>rob.savoye@linaro.org</userId>
- </slave>
- <slave>
- <name>cortex-a8-06</name>
- <description>LAVA Lab ARM A8 Beaglebone Black</description>
- <remoteFS>/home/buildslave</remoteFS>
- <numExecutors>1</numExecutors>
- <mode>NORMAL</mode>
- <retentionStrategy class="hudson.slaves.RetentionStrategy$Always"/>
- <launcher class="hudson.plugins.sshslaves.SSHLauncher" plugin="ssh-slaves@1.5">
- <host>lab.validation.linaro.org</host>
- <port>41012</port>
- <credentialsId>6d1cef13-3c99-4564-93bc-60aa2bea21ab</credentialsId>
- </launcher>
- <label>arm a8 precise</label>
- <nodeProperties/>
- <userId>rob.savoye@linaro.org</userId>
- </slave>
diff --git a/ansible/files/jenkins_users.xml b/ansible/files/jenkins_users.xml
deleted file mode 100644
index c3c4cb5..0000000
--- a/ansible/files/jenkins_users.xml
+++ /dev/null
@@ -1,6 +0,0 @@
- <permission>hudson.model.Hudson.Read:frontend</permission>
- <permission>hudson.model.Item.Build:frontend</permission>
- <permission>hudson.model.Item.Configure:frontend</permission>
- <permission>hudson.model.Item.Create:frontend</permission>
- <permission>hudson.model.Item.Delete:frontend</permission>
- <permission>hudson.model.Item.Read:frontend</permission>
diff --git a/ansible/filter_plugins/custom_plugins.py b/ansible/filter_plugins/custom_plugins.py
deleted file mode 100644
index 45a2031..0000000
--- a/ansible/filter_plugins/custom_plugins.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import hashlib
-
-
-class FilterModule(object):
- ''' Custom filters are loaded by FilterModule objects '''
-
- def filters(self):
- return {
- 'jenkins_hash': self.jenkins_hash,
- }
-
- def jenkins_hash(self, value):
- # TODO: generate salt randomly
- salt = "salt"
- h = hashlib.sha256()
- h.update("%s{%s}" % (value, salt))
- return salt + ":" + h.hexdigest()
diff --git a/ansible/frontend.yml b/ansible/frontend.yml
deleted file mode 100644
index 8f116c3..0000000
--- a/ansible/frontend.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- hosts: android-build
- gather_facts: no
- vars:
- - linaro_android_frontend_repo: http://git.linaro.org/git/infrastructure/linaro-android-frontend.git
- - linaro_android_frontend_rev: HEAD
- vars_files:
- - ["{{private_vars}}", "vars/empty.yml"]
- roles:
- - common
- - {role: jenkins-user, when: not jenkins_config_from_backup}
- - frontend
diff --git a/ansible/group_vars/all b/ansible/group_vars/all
deleted file mode 100644
index 127ac9f..0000000
--- a/ansible/group_vars/all
+++ /dev/null
@@ -1,48 +0,0 @@
-# These are global variables, applying to every play
----
-ansible_ssh_port: 22
-
-# We could reference secrets outside of playbook dir for most things,
-# except for templates, see https://github.com/ansible/ansible/issues/7106
-# So, we have to make a symlink inside playbook dir and reference it such.
-#secrets_dir: ../../ansible-secrets-jenkins
-secrets_dir: secrets
-
-# The path is relative to roles' main.yml file. Do not use in non-role tasks!
-# TODO: Ansible really should provide var for top-level playbook dir
-cred_store: ../../../{{secrets_dir}}/generated/{{inventory_hostname}}_{{ansible_ssh_port}}
-
-# Production/development setup type
-production: no
-
-# File holding generic private variables
-private_vars: "{{secrets_dir}}/main.yml"
-
-# By default, generate self-signed SSL certificate
-ssl_cert_self_signed: yes
-
-# Whether to take Jenkins config from backup repository, or
-# instantiate from template, based on high-level options below
-# Default is yes for production hosts, no otherwise.
-jenkins_config_from_backup: "{{production}}"
-
-## Setting below mostly take effect only if "jenkins_config_from_backup: no"
-
-# Jenkins setup is by default non-private - override as needed
-jenkins_private: no
-
-# "native" or "crowd". Crowd requires private credentials.
-jenkins_auth: native
-
-jenkins_managed_config: yes
-# It's generally insecure to execute on master, so we have default as 0
-jenkins_master_executors: 0
-
-# Whether to set up Jenkins SSH publishing. Requires private credentials.
-jenkins_setup_ssh_publish: no
-
-# Jenkins will be accessible at this absolute URL. This is server root by default,
-# unless there's some kind of frontend is running
-jenkins_prefix: /
-
-jenkins_url: https://{{site_name}}{{jenkins_prefix}}
diff --git a/ansible/group_vars/android-build b/ansible/group_vars/android-build
deleted file mode 100644
index 189c535..0000000
--- a/ansible/group_vars/android-build
+++ /dev/null
@@ -1,7 +0,0 @@
-site_name: android-build.linaro.org
-hosttype: android-build
-
-jenkins_private: no
-#jenkins_reply_to: rdk-project@lists.linaro.org
-
-jenkins_prefix: /jenkins
diff --git a/ansible/group_vars/ci b/ansible/group_vars/ci
deleted file mode 100644
index 7095db7..0000000
--- a/ansible/group_vars/ci
+++ /dev/null
@@ -1,5 +0,0 @@
-site_name: ci.linaro.org
-hosttype: ci
-
-jenkins_private: no
-#jenkins_reply_to: rdk-project@lists.linaro.org
diff --git a/ansible/group_vars/ec2 b/ansible/group_vars/ec2
deleted file mode 100644
index 403dd87..0000000
--- a/ansible/group_vars/ec2
+++ /dev/null
@@ -1,5 +0,0 @@
-# Production instances now use real usernames, so use --user=
-# ansible_ssh_user: ubuntu
-
-# Workaround for Ansible not liking empty var files
-_empty: _empty
diff --git a/ansible/group_vars/production b/ansible/group_vars/production
deleted file mode 100644
index 8981625..0000000
--- a/ansible/group_vars/production
+++ /dev/null
@@ -1,5 +0,0 @@
-production: yes
-jenkins_managed_config: no
-jenkins_auth: crowd
-jenkins_setup_ssh_publish: yes
-ssl_cert_self_signed: no
diff --git a/ansible/group_vars/rdk-ci b/ansible/group_vars/rdk-ci
deleted file mode 100644
index 4fabb43..0000000
--- a/ansible/group_vars/rdk-ci
+++ /dev/null
@@ -1,5 +0,0 @@
-site_name: rdk.ci.linaro.org
-hosttype: rdk-ci
-
-jenkins_private: yes
-jenkins_reply_to: rdk-project@lists.linaro.org
diff --git a/ansible/group_vars/tcwg-ci b/ansible/group_vars/tcwg-ci
deleted file mode 100644
index 44525bd..0000000
--- a/ansible/group_vars/tcwg-ci
+++ /dev/null
@@ -1,7 +0,0 @@
-site_name: tcwg.ci.linaro.org
-hosttype: tcwg-ci
-
-jenkins_private: yes
-jenkins_slaves_file: files/jenkins_slaves.xml.tcwg-ci
-jenkins_ssh_publishers_file: "{{secrets_dir}}/jenkins_ssh_publishers.xml.tcwg-ci"
-jenkins_master_executors: 2
diff --git a/ansible/group_vars/vagrant b/ansible/group_vars/vagrant
deleted file mode 100644
index eefdef3..0000000
--- a/ansible/group_vars/vagrant
+++ /dev/null
@@ -1,3 +0,0 @@
-ansible_ssh_port: 2222
-ansible_ssh_user: vagrant
-ansible_ssh_pass: vagrant
diff --git a/ansible/host_vars/ec2-devel-host b/ansible/host_vars/ec2-devel-host
deleted file mode 100644
index b58dbd0..0000000
--- a/ansible/host_vars/ec2-devel-host
+++ /dev/null
@@ -1 +0,0 @@
-ansible_ssh_host: ec2-54-237-25-17.compute-1.amazonaws.com
diff --git a/ansible/host_vars/tcwg.ci.linaro.org b/ansible/host_vars/tcwg.ci.linaro.org
deleted file mode 100644
index 2c5efe2..0000000
--- a/ansible/host_vars/tcwg.ci.linaro.org
+++ /dev/null
@@ -1,5 +0,0 @@
-# Until DNS set up
-#ansible_ssh_host: 50.19.223.142
-
-# Block device to use for Jenkins jobs storage
-job_store_dev: /dev/xvdc
diff --git a/ansible/hosts-devel-android-build b/ansible/hosts-devel-android-build
deleted file mode 100644
index 2e072e5..0000000
--- a/ansible/hosts-devel-android-build
+++ /dev/null
@@ -1,19 +0,0 @@
-[vagrant]
-localhost:2222
-
-[ec2-devel]
-# Virtual host name, actual assigned via host_vars
-ec2-devel-host
-
-[ec2:children]
-ec2-devel
-
-[devel:children]
-vagrant
-ec2-devel
-
-[android-build:children]
-devel
-
-[jenkins-generic:children]
-android-build
diff --git a/ansible/hosts-devel-ci b/ansible/hosts-devel-ci
deleted file mode 100644
index 17ac75d..0000000
--- a/ansible/hosts-devel-ci
+++ /dev/null
@@ -1,19 +0,0 @@
-[vagrant]
-localhost:2222
-
-[ec2-devel]
-# Virtual host name, actual assigned via host_vars
-ec2-devel-host
-
-[ec2:children]
-ec2-devel
-
-[devel:children]
-vagrant
-ec2-devel
-
-[ci:children]
-devel
-
-[jenkins-generic:children]
-ci
diff --git a/ansible/hosts-devel-rdk-ci b/ansible/hosts-devel-rdk-ci
deleted file mode 100644
index 0a67e9f..0000000
--- a/ansible/hosts-devel-rdk-ci
+++ /dev/null
@@ -1,19 +0,0 @@
-[vagrant]
-localhost:2222
-
-[ec2-devel]
-# Virtual host name, actual assigned via host_vars
-ec2-devel-host
-
-[ec2:children]
-ec2-devel
-
-[devel:children]
-vagrant
-ec2-devel
-
-[rdk-ci:children]
-devel
-
-[jenkins-generic:children]
-rdk-ci
diff --git a/ansible/hosts-devel-tcwg-ci b/ansible/hosts-devel-tcwg-ci
deleted file mode 100644
index c9b3e25..0000000
--- a/ansible/hosts-devel-tcwg-ci
+++ /dev/null
@@ -1,19 +0,0 @@
-[vagrant]
-localhost:2222
-
-[ec2-devel]
-# Virtual host name, actual assigned via host_vars
-ec2-devel-host
-
-[ec2:children]
-ec2-devel
-
-[devel:children]
-vagrant
-ec2-devel
-
-[tcwg-ci:children]
-devel
-
-[jenkins-generic:children]
-tcwg-ci
diff --git a/ansible/hosts-prod b/ansible/hosts-prod
deleted file mode 100644
index 3cd4ea0..0000000
--- a/ansible/hosts-prod
+++ /dev/null
@@ -1,26 +0,0 @@
-[android-build]
-android-build.linaro.org
-
-[ci]
-ci.linaro.org
-
-[rdk-ci]
-rdk.ci.linaro.org
-
-[tcwg-ci]
-tcwg.ci.linaro.org
-
-# All individual services are based on jenkins-generic
-[jenkins-generic:children]
-android-build
-ci
-rdk-ci
-tcwg-ci
-
-# Superset of all services
-[production:children]
-jenkins-generic
-
-# All production sites are ec2
-[ec2:children]
-production
diff --git a/ansible/jenkins-ec2-dirs.yml b/ansible/jenkins-ec2-dirs.yml
deleted file mode 100644
index 623df0b..0000000
--- a/ansible/jenkins-ec2-dirs.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-# This play sets up production Jenkins partition/directory structure
----
-- hosts: production:&ec2
- gather_facts: no
- tasks:
- - name: Prepare jobs volume mount point
- file: state=directory dest=/mnt2
- sudo: yes
- - name: Add jobs volume mount point to /etc/fstab
- mount: state=mounted src={{job_store_dev}} name=/mnt2 fstype=auto passno=2
- sudo: yes
- - name: Prepare jobs volume directory structure
- file: state=directory dest=/mnt2/jenkins/jobs
- sudo: yes
- - name: Prepare Jenkins jobs bind mount point
- file: state=directory dest=/var/lib/jenkins/jobs
- sudo: yes
- - name: Add Jenkins jobs bind mount point to /etc/fstab
- mount: state=mounted src=/mnt2/jenkins/jobs name=/var/lib/jenkins/jobs
- fstype=none opts="defaults,bind" passno=0
- sudo: yes
diff --git a/ansible/jenkins.yml b/ansible/jenkins.yml
deleted file mode 100644
index ea20dd4..0000000
--- a/ansible/jenkins.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- hosts: jenkins-generic
- gather_facts: no
- vars:
- - linaro_android_build_tools_repo: http://git.linaro.org/git/infrastructure/linaro-android-build-tools.git
- - linaro_android_build_tools_rev: HEAD
- - jenkins_version: 1.565.2
- - ssl_cert: /etc/ssl/certs/{{site_name}}.crt
- - ssl_key: /etc/ssl/private/{{site_name}}.key
- - jenkins_extra_users: files/jenkins_users.xml
- vars_files:
- # Include private settings only if they exist - not all modes need
- # them (e.g. jenkins_auth: native).
- # Following construct works as: find first file which exists and use it.
- - ["{{private_vars}}", "vars/empty.yml"]
- - ["{{secrets_dir}}/host_vars/{{inventory_hostname}}", "vars/empty.yml"]
- roles:
- - env_validate
- - common
- - apache
- - {role: sslcert, tags: ["sslcert"]}
- - jenkins
- - {role: jenkins-config-skeleton, when: not jenkins_config_from_backup}
- - {role: jenkins-config-git, when: jenkins_config_from_backup}
- - {role: jenkins-ssh-publish, when: jenkins_setup_ssh_publish, tags: ['publish']}
- - {role: jenkins-linaro-theme}
- - {role: apache-site, config: "jenkins", when: hosttype != "android-build", tags: ["apache"]}
- - {role: apache-site, config: "android-build.linaro.org", when: hosttype == "android-build", tags: ["apache"]}
- - {role: linaro-jenkins-tools, tags: ['linaro-jenkins-tools']}
- - {role: jenkins-utils-cron-disable}
- - {role: jenkins-utils-publish-staging}
- - {role: jenkins-utils-set-jenkins-url}
- - {role: new-publish, when: hosttype == "android-build", tags: ["new-publish"]}
- - {role: squid, tags: ["squid"]}
- - {role: jenkins-cronjobs, tags: ["cronjob"]}
- tasks:
- - name: Check out linaro-android-build-tools
- git: name={{linaro_android_build_tools_repo}} version={{linaro_android_build_tools_rev}}
- dest=~/linaro-android-build-tools
- tags:
- - git
diff --git a/ansible/mangle-jobs-remote b/ansible/mangle-jobs-remote
deleted file mode 100755
index a81ca3d..0000000
--- a/ansible/mangle-jobs-remote
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-
-if [ -z "$script" ]; then
- echo "Usage: script=<script> $0 <ansible host selector>"
- exit 1
-fi
-
-extra=""
-
-if [ "$really" == "true" ]; then
- extra="$extra --really"
-fi
-
-
-ansible all "$@" \
- -m command \
- -a "./mangle-jobs $script --dir=/var/lib/jenkins $extra chdir=linaro-jenkins-tools/mangle-jobs/"
diff --git a/ansible/new_publish.yml b/ansible/new_publish.yml
deleted file mode 100644
index 3edd6f1..0000000
--- a/ansible/new_publish.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-# Note: older development, not ported to fit with new plays!
----
-- hosts: publishing
- gather_facts: no
- connection: local
- user: root
- vars:
- - root: ''
- - publish_home: $root/mnt/publish
- # not ideal
- - keys_dir: $publish_home
- tasks:
- - name: Create top-level publishing dir
- # Home dir must be owned by root for ssh ChrootDirectory to work
- file: dest=$publish_home state=directory mode=0755 owner=root group=root
- - name: Create publish group
- group: name=publish state=present
- - name: Create publish-copy user
- user: name=publish-copy comment='Publishing - transfer user'
- group=publish home=$publish_home
- generate_ssh_key=yes
- ssh_key_file=$keys_dir/publish-copy
- - name: Create publish-trigger user
- user: name=publish-trigger comment='Publishing - trigger user'
- group=publish home=$publish_home
- generate_ssh_key=yes
- ssh_key_file=$keys_dir/publish-trigger
- - name: Create upload dir
- # Actual uploads will happen to this dir
- # publish-copy should have write access there, publish-trigger
- # generally only read (cleanup can be handled by cronjob)
- file: dest=$publish_home/uploads state=directory mode=0755 owner=publish-copy group=publish
-
-
- - name: Create /etc/ssh/user-authorized-keys/
- file: dest=$root/etc/ssh/user-authorized-keys/ state=directory mode=0755 owner=root group=root
-
- - name: Setup publish-copy user SSH restrictions
- template: src=templates/publish-copy.j2
- dest=$root/etc/ssh/user-authorized-keys/publish-copy
- owner=root group=root mode=0644
-
- - name: Setup publish-trigger user SSH restrictions
- template: src=templates/publish-trigger.j2
- dest=$root/etc/ssh/user-authorized-keys/publish-trigger
- owner=root group=root mode=0644
diff --git a/ansible/reposeed.yml b/ansible/reposeed.yml
deleted file mode 100644
index af61d92..0000000
--- a/ansible/reposeed.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-- hosts: android-build
- gather_facts: no
- roles:
- - common
- - reposeed
diff --git a/ansible/roles/apache-site/tasks/main.yml b/ansible/roles/apache-site/tasks/main.yml
deleted file mode 100644
index 71b3181..0000000
--- a/ansible/roles/apache-site/tasks/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-- name: Install Apache site config
- template: src=files/{{config}}.conf dest=/etc/apache2/sites-available/{{config}}.conf mode=0644 backup=yes
- sudo: yes
- notify:
- - Restart Apache
-- name: Enable site config
- command: a2ensite {{config}} creates=/etc/apache2/sites-enabled/{{config}}
- sudo: yes
- register: result
- changed_when: "'already enabled' not in result.stdout"
- notify:
- - Restart Apache
-- name: Disable OS default site config
- command: a2dissite 000-default
- sudo: yes
- register: result
- changed_when: "'already disabled' not in result.stdout"
- notify:
- - Restart Apache
diff --git a/ansible/roles/apache/tasks/main.yml b/ansible/roles/apache/tasks/main.yml
deleted file mode 100644
index afd886e..0000000
--- a/ansible/roles/apache/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-- name: Install Apache2
- apt: pkg={{item}}
- sudo: yes
- with_items:
- - apache2
- # This is actually needed only for android-build frontend app
- - libapache2-mod-wsgi
-- name: Enable Apache modules
- command: a2enmod {{item}} creates=/etc/apache2/mods-enabled/{{item}}.load
- sudo: yes
- with_items:
- - proxy
- - proxy_http
- - headers
- - rewrite
- - expires
- - ssl
diff --git a/ansible/roles/common/handlers/main.yml b/ansible/roles/common/handlers/main.yml
deleted file mode 100644
index 3dc9a22..0000000
--- a/ansible/roles/common/handlers/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-- name: Restart Apache
- service: name=apache2 state=restarted
- sudo: yes
-
-- name: Restart SSHD
- service: name=ssh state=restarted
- sudo: yes
diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml
deleted file mode 100644
index 3e46c32..0000000
--- a/ansible/roles/common/tasks/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-- name: apt-get update
- apt: update_cache=yes
- sudo: yes
-
-- name: Install base packages
- apt: pkg={{item}}
- sudo: yes
- with_items:
- - bzr
- - git-core
- - gnupg
- - make
- - zsh
- # Needed by ci.linaro.org
- - dput
diff --git a/ansible/roles/env_validate/tasks/main.yml b/ansible/roles/env_validate/tasks/main.yml
deleted file mode 100644
index 51dda3b..0000000
--- a/ansible/roles/env_validate/tasks/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-- name: Check for secrets repository
- local_action: stat path={{secrets_dir}}/main.yml
- register: secrets_exist
- when: production == True
-
-- name: Fail if secrets repository needed, but not available
- fail: msg="Production credentials repository not found"
- when: production and secrets_exist.stat.exists == False
diff --git a/ansible/roles/frontend/tasks/main.yml b/ansible/roles/frontend/tasks/main.yml
deleted file mode 100644
index 1d7f94c..0000000
--- a/ansible/roles/frontend/tasks/main.yml
+++ /dev/null
@@ -1,120 +0,0 @@
-- name: Install Frontend package dependencies
- apt: pkg={{item}}
- sudo: yes
- with_items:
- - libapache2-mod-wsgi
- - python-virtualenv
- - python-lxml
- - python-openid
- - tidy
- - unzip
- - python-cssutils
- - python-lxml
- - sqlite3
- # ??
- - python-pycurl
-
-- name: Create frontend user
- user: name=build-system-frontend comment="Android Build Frontend"
- sudo: yes
-
-- name: Create Frontend var dir
- file: state=directory path=/var/lib/linaro-abs-frontend/ owner=www-data
- sudo: yes
-
-- name: Install Frontend/Jenkins auth password
- copy: content={{ lookup('password', cred_store + '/jenkins/frontend') }}
- dest=/var/lib/linaro-abs-frontend/jenkins-password mode=0640
- sudo_user: www-data
- sudo: yes
- when: not jenkins_config_from_backup
-
-- name: Install Frontend/Jenkins auth password
- copy: content={{jenkins_frontend_passwd}}
- dest=/var/lib/linaro-abs-frontend/jenkins-password mode=0640
- sudo_user: www-data
- sudo: yes
- when: jenkins_config_from_backup
-
-- name: Check out Frontend
- git: name={{linaro_android_frontend_repo}} version={{linaro_android_frontend_rev}}
- dest=~/frontend-{{linaro_android_frontend_rev}}
- sudo_user: build-system-frontend
- sudo: yes
- tags:
- - git
-
-- name: Create Frontend current version symlink
- file: state=link src=~/frontend-{{linaro_android_frontend_rev}}/ dest=~/frontend
- sudo_user: build-system-frontend
- sudo: yes
- tags:
- - git
-
-- name: Install Frontend
- shell: cd ~/frontend; make
- sudo_user: build-system-frontend
- sudo: yes
-
-- name: Create production config
- # TODO: actually replace SECRET_KEY
- template: src=settings_prod.py dest=~build-system-frontend/ mode=0640 owner=build-system-frontend group=www-data
- sudo: yes
- notify:
- - Restart Apache
-- name: Create config symlink
- # wart: relative symlinks not supported
- file: state=link src=~/settings_prod.py dest=~/frontend/settings_prod.py
- sudo_user: build-system-frontend
- sudo: yes
-- name: Create DB
- shell: cd ~build-system-frontend/frontend; ./bin/manage syncdb --noinput --settings settings_prod
- sudo_user: www-data
- sudo: yes
- # Fixture is automatically installed by syncdb
- #sudo -u www-data ./bin/manage loaddata --settings settings_prod group-fixture.json
-
-- name: Download YUI3
- get_url: url=http://yui.zenfs.com/releases/yui3/yui_3.3.0.zip dest=~/yui_3.3.0.zip
- sudo_user: build-system-frontend
- sudo: yes
- register: download_yui3
-- name: Extract YUI3
- shell: cd ~; unzip -o -q yui_3.3.0.zip
- sudo_user: build-system-frontend
- sudo: yes
- when: download_yui3.changed
-- name: Checkout patched lazr-js
- bzr: name=lp:~mwhudson/lazr-js/combo-mod_wsgi-config dest=~/lazr-js
- # version=?
- sudo_user: build-system-frontend
- sudo: yes
-- name: Setup lazr-js 1/3
- file: state=directory path=~/lazr-js/{{item}}
- with_items:
- - build/3.3.0
- - build/gallery
- sudo_user: build-system-frontend
- sudo: yes
-- name: Setup lazr-js 2/3
- file: state=link src=~/yui/build/ dest=~/lazr-js/build/3.3.0/build
- sudo_user: build-system-frontend
- sudo: yes
-- name: Setup lazr-js 3/3
- file: state=directory path=~/lazr-js/build/gallery/{{item}}
- with_items:
- - gallery-overlay-extras
- - gallery-outside-events
- - gallery-base64
- sudo_user: build-system-frontend
- sudo: yes
-
-- name: Fetch YUI gallery components
- get_url: url=http://yui.yahooapis.com/combo?gallery-2010.12.16-18-24/build/{{item}}/{{item}}-min.js
- dest=~/lazr-js/build/gallery/{{item}}/{{item}}-min.js
- with_items:
- - gallery-overlay-extras
- - gallery-outside-events
- - gallery-base64
- sudo_user: build-system-frontend
- sudo: yes
diff --git a/ansible/roles/frontend/templates/settings_prod.py b/ansible/roles/frontend/templates/settings_prod.py
deleted file mode 100644
index 7cfa7c8..0000000
--- a/ansible/roles/frontend/templates/settings_prod.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from settings import *
-
-MEDIA_URL = '/static/'
-USE_OWN_COMBO = True
-
-DATABASES['default']['NAME'] = '/var/lib/linaro-abs-frontend/session.db'
-
-FRONTEND_JENKINS_USER = 'linaro-android-build-frontend@linaro.org'
-FRONTEND_JENKINS_PASSWORD = open('/var/lib/linaro-abs-frontend/jenkins-password').read().strip()
-
-{% if frontend_auth == "openid" %}
-LOGIN_URL = '/openid/login/'
-{% elif frontend_auth == "crowd" %}
-AUTH_CROWD_APPLICATION_USER = '{{crowd_user}}'
-AUTH_CROWD_APPLICATION_PASSWORD = '{{crowd_passwd}}'
-{% endif %}
-
-SECRET_KEY = "{{ lookup('password', cred_store + '/frontend/django_secret_key') }}"
diff --git a/ansible/roles/frontend/vars/main.yml b/ansible/roles/frontend/vars/main.yml
deleted file mode 100644
index e927a04..0000000
--- a/ansible/roles/frontend/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-# "crowd" or "openid"
-frontend_auth: crowd
diff --git a/ansible/roles/jenkins-config-git/tasks/main.yml b/ansible/roles/jenkins-config-git/tasks/main.yml
deleted file mode 100644
index bce4ba7..0000000
--- a/ansible/roles/jenkins-config-git/tasks/main.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-- debug: var=jenkins_config_from_backup
- tags:
- - git
-
-- name: Ensure Jenkins config dir exists
- file: path=/var/lib/jenkins owner=jenkins state=directory
- sudo: yes
- tags:
- - git
-
-- name: Check if Jenkins config already checked out
- stat: path=/var/lib/jenkins/.git
- register: checkout_exists
- tags:
- - git
-
-- name: Checkout production Jenkins config backup (if not yet)
- # dest must end with "jenkins"
- git: name=ssh://paul.sokolovsky@linaro-private.git.linaro.org/srv/linaro-private.git.linaro.org/linaro-infrastructure/jenkins-config-{{site_name}}.git
- dest=/home/ubuntu/jenkins
- accept_hostkey=true
- when: checkout_exists.stat.exists == False
- tags:
- - git
-
-- name: Copy config checkout to final destination (if not yet)
- shell: cp -a /home/ubuntu/jenkins /var/lib/
- sudo: true
- when: checkout_exists.stat.exists == False
- tags:
- - git
-
-- name: Ensure userContent redirect dir exists
- file: dest=/var/lib/jenkins/jobs/_extra/userContent state=directory mode=0775
- sudo: true
- tags:
- - git
-
-- name: Set correct permissions on config checkout
- file: path=/var/lib/jenkins owner=jenkins group=ubuntu state=directory recurse=yes
- sudo: true
- tags:
- - git
- #mode=0775
-
-- name: Set correct permissions for keys
- shell: chown .publish /var/lib/jenkins/.ssh/linaro-android-build-publish*; chmod 440 /var/lib/jenkins/.ssh/linaro-android-build-publish*
- sudo: true
- when: hosttype == "android-build"
- tags:
- - git
diff --git a/ansible/roles/jenkins-config-skeleton/files/jenkins-config/jobs/blank/config.xml b/ansible/roles/jenkins-config-skeleton/files/jenkins-config/jobs/blank/config.xml
deleted file mode 100644
index 72bc44e..0000000
--- a/ansible/roles/jenkins-config-skeleton/files/jenkins-config/jobs/blank/config.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<project>
- <actions/>
- <description></description>
- <keepDependencies>false</keepDependencies>
- <properties>
- <hudson.model.ParametersDefinitionProperty>
- <parameterDefinitions>
- <hudson.model.StringParameterDefinition>
- <name>CONFIG</name>
- <description></description>
- <defaultValue></defaultValue>
- </hudson.model.StringParameterDefinition>
- </parameterDefinitions>
- </hudson.model.ParametersDefinitionProperty>
- </properties>
- <scm class="hudson.scm.NullSCM"/>
- <assignedNode>ec2</assignedNode>
- <canRoam>false</canRoam>
- <disabled>false</disabled>
- <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
- <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
- <triggers class="vector"/>
- <concurrentBuild>true</concurrentBuild>
- <builders>
- <hudson.tasks.Shell>
- <command>rm -rf build-tools
-bzr clone lp:linaro-android-build-tools build-tools
-build-tools/node/build &quot;$JENKINS_URL&quot; &quot;$CONFIG&quot;
-</command>
- </hudson.tasks.Shell>
- </builders>
- <publishers>
- <hudson.tasks.ArtifactArchiver>
- <artifacts>build/out/target/*/*/*.img,build/out/target/*/*/*.tar.bz2,build/out/*.tar.bz2,build/out/*.xml</artifacts>
- <latestOnly>false</latestOnly>
- </hudson.tasks.ArtifactArchiver>
- </publishers>
- <buildWrappers/>
-</project> \ No newline at end of file
diff --git a/ansible/roles/jenkins-config-skeleton/files/jenkins-config/users/admin/config.xml b/ansible/roles/jenkins-config-skeleton/files/jenkins-config/users/admin/config.xml
deleted file mode 100644
index 4747f35..0000000
--- a/ansible/roles/jenkins-config-skeleton/files/jenkins-config/users/admin/config.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<user>
- <fullName>Jenkins Admin</fullName>
- <properties>
- <hudson.model.MyViewsProperty>
- <primaryViewName>All</primaryViewName>
- <views>
- <hudson.model.AllView>
- <owner class="hudson.model.MyViewsProperty" reference="../../.."/>
- <name>All</name>
- <filterExecutors>false</filterExecutors>
- <filterQueue>false</filterQueue>
- <properties class="hudson.model.View$PropertyList"/>
- </hudson.model.AllView>
- </views>
- </hudson.model.MyViewsProperty>
- <hudson.security.HudsonPrivateSecurityRealm_-Details>
- <passwordHash>pmqdNw:4a3d61f112ade59bf1e53b2743de44ea68d165490a2b6936bd7ec8f314e43a86</passwordHash>
- </hudson.security.HudsonPrivateSecurityRealm_-Details>
- <hudson.tasks.Mailer_-UserProperty>
- <emailAddress>admin@address.org</emailAddress>
- </hudson.tasks.Mailer_-UserProperty>
- </properties>
-</user>
diff --git a/ansible/roles/jenkins-config-skeleton/handlers/main.yml b/ansible/roles/jenkins-config-skeleton/handlers/main.yml
deleted file mode 100644
index 183c2bb..0000000
--- a/ansible/roles/jenkins-config-skeleton/handlers/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-- name: Restart Jenkins
- service: name=jenkins state=restarted
- sudo: yes
- # Apache proxy module sees that Jenkins is gone and then
- # goes into some timeout before reconnecting to it again.
- # Restart forces reconnect ASAP.
- notify: Restart Apache
diff --git a/ansible/roles/jenkins-config-skeleton/tasks/main.yml b/ansible/roles/jenkins-config-skeleton/tasks/main.yml
deleted file mode 100644
index 04d67ed..0000000
--- a/ansible/roles/jenkins-config-skeleton/tasks/main.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-- name: Verify Jenkins parameters
- fail: msg="crowd_user and crowd_passwd should be defined in {{private_vars}}"
- when: jenkins_auth == "crowd" and (crowd_user is not defined or crowd_passwd is not defined)
-
-#- name: Set up minimal Jenkins configuration skeleton
-# # This requires recursive copy patch
-# copy: backup=yes src=jenkins-config/ dest=/var/lib/jenkins/ owner=jenkins
-# sudo: yes
-# notify:
-# - Restart Jenkins
-
-- name: Set up Jenkins job root dir
- file: state=directory dest=/var/lib/jenkins/jobs owner=jenkins mode=0755
- sudo: yes
-
-- name: Set up minimal Jenkins configuration skeleton 1
- file: state=directory dest=/var/lib/jenkins/{{item}} owner=jenkins
- sudo: yes
- with_items:
- - users/admin/
- - jobs/blank/
-
-- name: Set up minimal Jenkins configuration skeleton 2
- copy: backup=yes src=jenkins-config/{{item}} dest=/var/lib/jenkins/{{item}} owner=jenkins
- sudo: yes
- with_items:
- - users/admin/config.xml
- - jobs/blank/config.xml
- notify:
- - Restart Jenkins
-
-- when: jenkins_managed_config
- name: Create managed Jenkins config file
- template: backup=yes src=jenkins-config/config.xml dest=/var/lib/jenkins/
- owner=jenkins mode=0644
- sudo: yes
- notify:
- - Restart Jenkins
-
-- when: not jenkins_managed_config
- name: Create Ansible-speced Jenkins config file as config.xml.ansible
- template: backup=yes src=jenkins-config/config.xml dest=/var/lib/jenkins/config.xml.ansible
- owner=jenkins mode=0644
- sudo: yes
- notify:
- - Restart Jenkins
-- when: not jenkins_managed_config
- name: Copy config.xml.ansible to config.xml if configuring first time (fail means config.xml is not updated)
- shell: "cd /var/lib/jenkins; [ ! -f config.xml ] && cp config.xml.ansible config.xml"
- ignore_errors: yes
- sudo: yes
- sudo_user: jenkins
-
-- name: Create Jenkins external address config
- template: src=jenkins-config/hudson.tasks.Mailer.xml
- dest=/var/lib/jenkins/hudson.tasks.Mailer.xml owner=jenkins mode=0644 backup=yes
- sudo: yes
diff --git a/ansible/roles/jenkins-config-skeleton/templates/jenkins-config/config.xml b/ansible/roles/jenkins-config-skeleton/templates/jenkins-config/config.xml
deleted file mode 100644
index 27e451a..0000000
--- a/ansible/roles/jenkins-config-skeleton/templates/jenkins-config/config.xml
+++ /dev/null
@@ -1,109 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<hudson>
- <disabledAdministrativeMonitors/>
- <version>1.509.4</version>
- <numExecutors>{{jenkins_master_executors}}</numExecutors>
- <mode>NORMAL</mode>
- <useSecurity>true</useSecurity>
- <authorizationStrategy class="hudson.security.ProjectMatrixAuthorizationStrategy">
- <permission>hudson.model.Hudson.Administer:admin</permission>
- <permission>hudson.model.Hudson.Administer:jenkins-admins</permission>
- <permission>hudson.model.Hudson.Read:authenticated</permission>
-{% if not jenkins_private %}
- <permission>hudson.model.Hudson.Read:anonymous</permission>
- <permission>hudson.model.Item.Read:anonymous</permission>
-{% endif %}
-{% include jenkins_extra_users %}
-
- </authorizationStrategy>
-{% if jenkins_auth == "native" %}
- <securityRealm class="hudson.security.HudsonPrivateSecurityRealm">
- <disableSignup>false</disableSignup>
- </securityRealm>
-{% elif jenkins_auth == "crowd" %}
- <securityRealm class="de.theit.jenkins.crowd.CrowdSecurityRealm" plugin="crowd2@1.6">
- <url>https://login.linaro.org:8443/crowd</url>
- <applicationName>{{crowd_user}}</applicationName>
- <password>{{crowd_passwd}}</password>
- <group>linaro-login-users</group>
- <nestedGroups>true</nestedGroups>
- <useSSO>false</useSSO>
- <sessionValidationInterval>10</sessionValidationInterval>
- </securityRealm>
-{% else %}
- {{ fail("Unknown jenkins_auth value: " + jenkins_auth) }}
-{% endif %}
- <projectNamingStrategy class="jenkins.model.ProjectNamingStrategy$DefaultProjectNamingStrategy"/>
- <workspaceDir>${ITEM_ROOTDIR}/workspace</workspaceDir>
- <buildsDir>${ITEM_ROOTDIR}/builds</buildsDir>
- <markupFormatter class="hudson.markup.RawHtmlMarkupFormatter">
- <disableSyntaxHighlighting>false</disableSyntaxHighlighting>
- </markupFormatter>
- <jdks/>
- <viewsTabBar class="hudson.views.DefaultViewsTabBar"/>
- <myViewsTabBar class="hudson.views.DefaultMyViewsTabBar"/>
- <clouds>
- <hudson.plugins.ec2.EC2Cloud>
- <name>ec2-US_EAST_1</name>
- <accessId></accessId>
- <secretKey>NSdfT2gW7whnbhkIpFcg4Q==</secretKey>
- <privateKey>
- <privateKey>NSdfT2gW7whnbhkIpFcg4Q==</privateKey>
- </privateKey>
- <instanceCap>10</instanceCap>
- <templates>
- <hudson.plugins.ec2.SlaveTemplate>
- <ami>ami-68ad5201</ami>
- <description>Natty Release 64bit Instance Store</description>
- <zone></zone>
- <securityGroups></securityGroups>
- <remoteFS>/mnt/jenkins</remoteFS>
- <sshPort>22</sshPort>
- <type>M1Xlarge</type>
- <labels>ec2 natty 64bit</labels>
- <mode>NORMAL</mode>
- <initScript>{
-apt-get update
-apt-get install -y bzr
-bzr clone lp:linaro-android-build-tools /tmp/build-tools
-time /tmp/build-tools/node/root-setup-android-build-node
-} 2&gt;&amp;1 | tee /tmp/instance-log.txt
-</initScript>
- <userData></userData>
- <numExecutors>1</numExecutors>
- <remoteAdmin>ubuntu</remoteAdmin>
- <rootCommandPrefix>sudo</rootCommandPrefix>
- <jvmopts></jvmopts>
- <subnetId></subnetId>
- <idleTerminationMinutes>30</idleTerminationMinutes>
- <instanceCap>0</instanceCap>
- <stopOnTerminate>false</stopOnTerminate>
- <usePrivateDnsName>false</usePrivateDnsName>
- </hudson.plugins.ec2.SlaveTemplate>
- </templates>
- <region>US_EAST_1</region>
- </hudson.plugins.ec2.EC2Cloud>
- </clouds>
- <slaves>
-{% if jenkins_slaves_file is defined %}
-{% include jenkins_slaves_file %}
-
-{% endif %}
- </slaves>
- <quietPeriod>5</quietPeriod>
- <scmCheckoutRetryCount>0</scmCheckoutRetryCount>
- <views>
- <hudson.model.AllView>
- <owner class="hudson" reference="../../.."/>
- <name>All</name>
- <filterExecutors>false</filterExecutors>
- <filterQueue>false</filterQueue>
- <properties class="hudson.model.View$PropertyList"/>
- </hudson.model.AllView>
- </views>
- <primaryView>All</primaryView>
- <slaveAgentPort>-1</slaveAgentPort>
- <label></label>
- <nodeProperties/>
- <globalNodeProperties/>
-</hudson> \ No newline at end of file
diff --git a/ansible/roles/jenkins-config-skeleton/templates/jenkins-config/hudson.tasks.Mailer.xml b/ansible/roles/jenkins-config-skeleton/templates/jenkins-config/hudson.tasks.Mailer.xml
deleted file mode 100644
index ad09490..0000000
--- a/ansible/roles/jenkins-config-skeleton/templates/jenkins-config/hudson.tasks.Mailer.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<hudson.tasks.Mailer_-DescriptorImpl>
- <defaultSuffix>@linaro.org</defaultSuffix>
- <hudsonUrl>{{jenkins_url}}</hudsonUrl>
- <smtpAuthUsername>ci_notify@linaro.org</smtpAuthUsername>
- <smtpAuthPassword>{{smtpAuthPassword}}</smtpAuthPassword>
- <adminAddress>address not configured yet &lt;nobody@nowhere&gt;</adminAddress>
-{% if jenkins_reply_to is defined %}
- <replyToAddress>{{jenkins_reply_to}}</replyToAddress>
-{% endif %}
- <smtpHost>smtp.gmail.com</smtpHost>
- <useSsl>true</useSsl>
- <smtpPort>465</smtpPort>
- <charset>UTF-8</charset>
-</hudson.tasks.Mailer_-DescriptorImpl>
diff --git a/ansible/roles/jenkins-cronjobs/tasks/main.yml b/ansible/roles/jenkins-cronjobs/tasks/main.yml
deleted file mode 100644
index a093cd7..0000000
--- a/ansible/roles/jenkins-cronjobs/tasks/main.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Make sure Jenkins is (re)started
-- meta: flush_handlers
-
-- wait_for: port={{jenkins_port}} timeout=60
-
-# Container port may be opened, but app running there may not yet be ready
-- wait_for: port={{jenkins_port}} delay=10 timeout=60
-
-- name: Download jenkins-cli, required by some cronjobs
- get_url: url=http://localhost:{{jenkins_port}}{{jenkins_prefix}}/jnlpJars/jenkins-cli.jar
- dest=/var/lib/jenkins
- register: result
- ignore_errors: yes
- until: result | success
- retries: 10
- delay: 7
- sudo: yes
- tags:
- - cronjob
-
-- name: Set up cronjob to expire old builds of inactive jobs
- cron: name="Expire old builds"
- job="java -jar /var/lib/jenkins/jenkins-cli.jar -s http://localhost:{{jenkins_port}}{{jenkins_prefix}}/ -i /var/lib/jenkins/.ssh/jenkinscli groovy linaro-jenkins-tools/diskspace/expire-builds.groovy"
- minute=0 hour=19
- cron_file=linaro-jenkins user=jenkins
- sudo: yes
- tags:
- - cronjob
diff --git a/ansible/roles/jenkins-linaro-theme/tasks/main.yml b/ansible/roles/jenkins-linaro-theme/tasks/main.yml
deleted file mode 100644
index 90b6493..0000000
--- a/ansible/roles/jenkins-linaro-theme/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-- name: Checkout jenkins-linaro-theme
- git: name=http://git.linaro.org/git/infrastructure/jenkins-linaro-theme.git
- dest=/var/lib/jenkins/userContent/theme
- sudo: yes
- tags:
- - git
- - jenkins-install
-
-- name: Enable jenkins-linaro-theme
- template: src=org.codefirst.SimpleThemeDecorator.xml dest=/var/lib/jenkins/org.codefirst.SimpleThemeDecorator.xml mode=0644
- sudo: yes
- notify:
- - Restart Jenkins
- sudo: yes
- tags:
- - jenkins-install
diff --git a/ansible/roles/jenkins-linaro-theme/templates/org.codefirst.SimpleThemeDecorator.xml b/ansible/roles/jenkins-linaro-theme/templates/org.codefirst.SimpleThemeDecorator.xml
deleted file mode 100644
index c5fbd37..0000000
--- a/ansible/roles/jenkins-linaro-theme/templates/org.codefirst.SimpleThemeDecorator.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<org.codefirst.SimpleThemeDecorator plugin="simple-theme-plugin@0.3">
- <cssUrl>/userContent/theme/linaro/css/linaro.css</cssUrl>
- <jsUrl></jsUrl>
-</org.codefirst.SimpleThemeDecorator>
diff --git a/ansible/roles/jenkins-ssh-publish/tasks/main.yml b/ansible/roles/jenkins-ssh-publish/tasks/main.yml
deleted file mode 100644
index dcabfea..0000000
--- a/ansible/roles/jenkins-ssh-publish/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-- name: Set up Jenkins Publish-over-SSH config file
- template: backup=yes src=jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml
- dest=/var/lib/jenkins/ owner=jenkins backup=yes
- sudo: yes
- notify:
- - Restart Jenkins
- tags:
- - template
-- name: Create keys dir
- file: state=directory path=/home/ubuntu/snapshots-sync
- sudo: yes
-- name: Copy keys
- copy: src={{secrets_dir}}/jenkins-publish/{{item}} dest=/home/ubuntu/snapshots-sync/ owner=jenkins mode=0400
- backup=yes
- with_items:
- - linaro-ci-publish
- - linaro-ci-publish-trigger
- sudo: yes
diff --git a/ansible/roles/jenkins-ssh-publish/templates/jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml b/ansible/roles/jenkins-ssh-publish/templates/jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml
deleted file mode 100644
index 4a028b0..0000000
--- a/ansible/roles/jenkins-ssh-publish/templates/jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml
+++ /dev/null
@@ -1,87 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<jenkins.plugins.publish__over__ssh.BapSshPublisherPlugin_-Descriptor plugin="publish-over-ssh@1.10">
- <hostConfigurations>
- <jenkins.plugins.publish__over__ssh.BapSshHostConfiguration>
- <name>snapshots.linaro.org</name>
- <hostname>snapshots.linaro.org</hostname>
- <username>linaro-ci-publish</username>
- <secretPassword></secretPassword>
- <remoteRootDir>uploads</remoteRootDir>
- <port>22</port>
- <commonConfig class="jenkins.plugins.publish_over_ssh.BapSshCommonConfiguration">
- <secretPassphrase></secretPassphrase>
- <key></key>
- <keyPath></keyPath>
- <disableAllExec>false</disableAllExec>
- </commonConfig>
- <timeout>300000</timeout>
- <overrideKey>true</overrideKey>
- <disableExec>false</disableExec>
- <keyInfo>
- <secretPassphrase></secretPassphrase>
- <key></key>
- <keyPath>/home/ubuntu/snapshots-sync/linaro-ci-publish</keyPath>
- </keyInfo>
- </jenkins.plugins.publish__over__ssh.BapSshHostConfiguration>
- <jenkins.plugins.publish__over__ssh.BapSshHostConfiguration>
- <name>snapshots.linaro.org file-move</name>
- <hostname>snapshots.linaro.org</hostname>
- <username>linaro-ci-publish-trigger</username>
- <secretPassword></secretPassword>
- <remoteRootDir></remoteRootDir>
- <port>22</port>
- <commonConfig class="jenkins.plugins.publish_over_ssh.BapSshCommonConfiguration" reference="../../jenkins.plugins.publish__over__ssh.BapSshHostConfiguration/commonConfig"/>
- <timeout>300000</timeout>
- <overrideKey>true</overrideKey>
- <disableExec>false</disableExec>
- <keyInfo>
- <secretPassphrase></secretPassphrase>
- <key></key>
- <keyPath>/home/ubuntu/snapshots-sync/linaro-ci-publish-trigger</keyPath>
- </keyInfo>
- </jenkins.plugins.publish__over__ssh.BapSshHostConfiguration>
-{% if jenkins_ssh_publishers_file is defined %}
-{% include jenkins_ssh_publishers_file %}
-
-{% endif %}
- </hostConfigurations>
- <commonConfig reference="../hostConfigurations/jenkins.plugins.publish__over__ssh.BapSshHostConfiguration/commonConfig"/>
- <defaults class="jenkins.plugins.publish_over_ssh.options.SshOverrideDefaults">
- <overrideInstanceConfig>
- <continueOnError>false</continueOnError>
- <failOnError>false</failOnError>
- <alwaysPublishFromMaster>true</alwaysPublishFromMaster>
- </overrideInstanceConfig>
- <overrideParamPublish>
- <parameterName></parameterName>
- </overrideParamPublish>
- <overridePublisher>
- <configName></configName>
- <useWorkspaceInPromotion>false</useWorkspaceInPromotion>
- <usePromotionTimestamp>false</usePromotionTimestamp>
- <verbose>false</verbose>
- </overridePublisher>
- <overridePublisherLabel>
- <label></label>
- </overridePublisherLabel>
- <overrideRetry>
- <retries>0</retries>
- <retryDelay>10000</retryDelay>
- </overrideRetry>
- <overrideTransfer>
- <execCommand></execCommand>
- <execTimeout>120000</execTimeout>
- <sourceFiles></sourceFiles>
- <removePrefix></removePrefix>
- <remoteDirectory></remoteDirectory>
- <excludes></excludes>
- <remoteDirectorySDF>false</remoteDirectorySDF>
- <flatten>false</flatten>
- <cleanRemote>false</cleanRemote>
- <usePty>false</usePty>
- <noDefaultExcludes>false</noDefaultExcludes>
- <makeEmptyDirs>false</makeEmptyDirs>
- <patternSeparator>[, ]+</patternSeparator>
- </overrideTransfer>
- </defaults>
-</jenkins.plugins.publish__over__ssh.BapSshPublisherPlugin_-Descriptor>
diff --git a/ansible/roles/jenkins-user/tasks/main.yml b/ansible/roles/jenkins-user/tasks/main.yml
deleted file mode 100644
index 13abbfc..0000000
--- a/ansible/roles/jenkins-user/tasks/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-- name: Create Jenkins "frontend" user dir
- file: state=directory dest=/var/lib/jenkins/users/frontend owner=jenkins
- sudo: yes
-
-- name: Create Jenkins "frontend" user config
- template: src=jenkins-config/users/frontend/config.xml
- dest=/var/lib/jenkins/users/frontend/config.xml owner=jenkins
- sudo: yes
diff --git a/ansible/roles/jenkins-user/tasks/templates/jenkins-config/users/frontend/config.xml b/ansible/roles/jenkins-user/tasks/templates/jenkins-config/users/frontend/config.xml
deleted file mode 100644
index 4ca3b9f..0000000
--- a/ansible/roles/jenkins-user/tasks/templates/jenkins-config/users/frontend/config.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<user>
- <fullName>Linaro Cloud Buildd Frontend</fullName>
- <properties>
- <hudson.model.MyViewsProperty>
- <primaryViewName>All</primaryViewName>
- <views>
- <hudson.model.AllView>
- <owner class="hudson.model.MyViewsProperty" reference="../../.."/>
- <name>All</name>
- <filterExecutors>false</filterExecutors>
- <filterQueue>false</filterQueue>
- </hudson.model.AllView>
- </views>
- </hudson.model.MyViewsProperty>
- <hudson.security.HudsonPrivateSecurityRealm_-Details>
- <passwordHash>{{ lookup('password', cred_store + '/jenkins/frontend') | jenkins_hash }}</passwordHash>
- </hudson.security.HudsonPrivateSecurityRealm_-Details>
- <hudson.tasks.Mailer_-UserProperty>
- <emailAddress>frontend@address.org</emailAddress>
- </hudson.tasks.Mailer_-UserProperty>
- </properties>
-</user> \ No newline at end of file
diff --git a/ansible/roles/jenkins-utils-cron-disable/tasks/main.yml b/ansible/roles/jenkins-utils-cron-disable/tasks/main.yml
deleted file mode 100644
index 2acc855..0000000
--- a/ansible/roles/jenkins-utils-cron-disable/tasks/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-- name: Disable any jobs triggered based on time schedule ("cron jobs")
- command: ./mangle-jobs cron-disable.mangle --dir=/var/lib/jenkins --backup --really
- chdir=linaro-jenkins-tools/mangle-jobs/
- when: "not production and jenkins_config_from_backup"
- tags:
- - mangle-jobs
diff --git a/ansible/roles/jenkins-utils-publish-staging/tasks/main.yml b/ansible/roles/jenkins-utils-publish-staging/tasks/main.yml
deleted file mode 100644
index da6024d..0000000
--- a/ansible/roles/jenkins-utils-publish-staging/tasks/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-- name: Reconfigure SSH Publish Plugin use staging.snapshots.linaro.org
- command: ./mangle-jobs ssh-publish-plugin-set-staging.mangle
- --file=/var/lib/jenkins/jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml
- --backup --really
- chdir=linaro-jenkins-tools/mangle-jobs/
- when: "not production and jenkins_config_from_backup"
- tags:
- - mangle-jobs
diff --git a/ansible/roles/jenkins-utils-set-jenkins-url/tasks/main.yml b/ansible/roles/jenkins-utils-set-jenkins-url/tasks/main.yml
deleted file mode 100644
index de7352f..0000000
--- a/ansible/roles/jenkins-utils-set-jenkins-url/tasks/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-- name: Set Jenkins URL
- shell: JENKINS_URL=https://{{ansible_ssh_host}}{{jenkins_prefix}} ./mangle-jobs jenkins-url-set.mangle
- --file=/var/lib/jenkins/jenkins.model.JenkinsLocationConfiguration.xml
- --backup --really
- chdir=linaro-jenkins-tools/mangle-jobs/
- sudo: yes
- sudo_user: jenkins
- when: "not production and jenkins_config_from_backup"
- notify:
- - Restart Jenkins
- tags:
- - mangle-jobs
diff --git a/ansible/roles/jenkins/handlers/main.yml b/ansible/roles/jenkins/handlers/main.yml
deleted file mode 100644
index 183c2bb..0000000
--- a/ansible/roles/jenkins/handlers/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-- name: Restart Jenkins
- service: name=jenkins state=restarted
- sudo: yes
- # Apache proxy module sees that Jenkins is gone and then
- # goes into some timeout before reconnecting to it again.
- # Restart forces reconnect ASAP.
- notify: Restart Apache
diff --git a/ansible/roles/jenkins/tasks/jenkins-pkgs.yml b/ansible/roles/jenkins/tasks/jenkins-pkgs.yml
deleted file mode 100644
index 97beefb..0000000
--- a/ansible/roles/jenkins/tasks/jenkins-pkgs.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-- name: Install Jenkins dependency packages
- apt: pkg={{item}}
- sudo: yes
- with_items:
- - default-jre
- - daemon
- tags:
- - pkg
- - jenkins-install
-
-- name: Download Jenkins {{jenkins_version}} LTS package
- get_url: url=http://pkg.jenkins-ci.org/debian-stable/binary/jenkins_{{jenkins_version}}_all.deb
- dest=/tmp/jenkins_{{jenkins_version}}_all.deb
- tags:
- - pkg
- - jenkins-install
-
-- name: Install Jenkins
- shell: RUNLEVEL=1 dpkg -i --skip-same-version /tmp/jenkins_{{jenkins_version}}_all.deb
- sudo: yes
- register: dpkg_result
- changed_when: "'already installed' not in dpkg_result.stderr"
- notify:
- - Restart Jenkins
- tags:
- - pkg
- - jenkins-install
-
-# System-level Jenkins configuration
-- name: Configure Jenkins port
- lineinfile: regexp="^HTTP_PORT=" line="HTTP_PORT={{jenkins_port}}" dest=/etc/default/jenkins backup=yes
- sudo: yes
- when: jenkins_port != 8080
-
-- name: Configure Jenkins URL prefix
- # Note: Apache and Jenkins prefixes must match! (mod_proxy is not smart enough
- # to rewrite links in HTML, and mod_proxy_html is a chore to run.)
-
- # Add --prefix= arg to existing args if not there yet
- lineinfile: regexp='^JENKINS_ARGS=(?!.*--prefix=/jenkins)"?(.+?)"?$'
- line='JENKINS_ARGS="\1 --prefix={{jenkins_prefix}}"'
- dest=/etc/default/jenkins backrefs=yes
- sudo: yes
- when: jenkins_prefix != "/"
diff --git a/ansible/roles/jenkins/tasks/jenkins-plugins.yml b/ansible/roles/jenkins/tasks/jenkins-plugins.yml
deleted file mode 100644
index 384d31b..0000000
--- a/ansible/roles/jenkins/tasks/jenkins-plugins.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-- name: Create Jenkins plugin dir
- file: state=directory path=~jenkins/plugins/ owner=jenkins
- sudo: yes
- tags:
- - jenkins-install
-
-- name: Download Jenkins plugins which override builtin ones
- get_url: url={{item.url}} dest=~jenkins/plugins/{{item.file}} force=yes owner=jenkins
- sudo: yes
- with_items: jenkins_override_plugins
- notify:
- - Restart Jenkins
- tags:
- - jenkins-install
-
-- name: Pin Jenkins plugins which override builtin ones
- copy: content="" dest=~jenkins/plugins/{{item.file}}.pinned owner=jenkins mode=0644
- sudo: yes
- with_items: jenkins_override_plugins
- notify:
- - Restart Jenkins
- tags:
- - jenkins-install
-
-- name: Download "normal" Jenkins plugins
- get_url: url={{item}} dest=~jenkins/plugins/ owner=jenkins
- sudo: yes
- with_items: jenkins_plugins
- notify:
- - Restart Jenkins
- tags:
- - jenkins-install
diff --git a/ansible/roles/jenkins/tasks/main.yml b/ansible/roles/jenkins/tasks/main.yml
deleted file mode 100644
index dfb4b8a..0000000
--- a/ansible/roles/jenkins/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-- include: jenkins-pkgs.yml
-- include: jenkins-plugins.yml
diff --git a/ansible/roles/jenkins/vars/main.yml b/ansible/roles/jenkins/vars/main.yml
deleted file mode 100644
index 12daa97..0000000
--- a/ansible/roles/jenkins/vars/main.yml
+++ /dev/null
@@ -1,63 +0,0 @@
-jenkins_port: 8080
-# These are upgrades of plugins that ship inside .war. Very unwise to do so of Jenkins.
-# Even more unwise is violent overwriting of upgrades, unless there's .pinned file.
-# https://wiki.jenkins-ci.org/display/JENKINS/Pinned+Plugins
-jenkins_override_plugins:
- - {url: "http://updates.jenkins-ci.org/download/plugins/antisamy-markup-formatter/1.2/antisamy-markup-formatter.hpi", file: "antisamy-markup-formatter.jpi"}
- - {url: "http://updates.jenkins-ci.org/download/plugins/credentials/1.16.1/credentials.hpi", file: "credentials.jpi"}
- - {url: "http://updates.jenkins-ci.org/download/plugins/cvs/2.12/cvs.hpi", file: "cvs.jpi"}
- - {url: "http://updates.jenkins-ci.org/download/plugins/ldap/1.10.2/ldap.hpi", file: "ldap.jpi"}
- - {url: "http://updates.jenkins-ci.org/download/plugins/mailer/1.11/mailer.hpi", file: "mailer.jpi"}
- - {url: "http://updates.jenkins-ci.org/download/plugins/matrix-auth/1.2/matrix-auth.hpi", file: "matrix-auth.jpi"}
- - {url: "http://updates.jenkins-ci.org/download/plugins/maven-plugin/2.6/maven-plugin.hpi", file: "maven-plugin.jpi"}
- - {url: "http://updates.jenkins-ci.org/download/plugins/ssh-credentials/1.9/ssh-credentials.hpi", file: "ssh-credentials.jpi" }
- - {url: "http://updates.jenkins-ci.org/download/plugins/ssh-slaves/1.7/ssh-slaves.hpi", file: "ssh-slaves.jpi" }
- - {url: "http://updates.jenkins-ci.org/download/plugins/subversion/2.4.3/subversion.hpi", file: "subversion.jpi"}
- - {url: "http://updates.jenkins-ci.org/download/plugins/translation/1.11/translation.hpi", file: "translation.jpi"}
-jenkins_plugins:
- - http://people.linaro.org/~paul.sokolovsky/jenkins/shell-status-20120125.hpi
- - http://updates.jenkins-ci.org/download/plugins/analysis-core/1.60/analysis-core.hpi
- - http://updates.jenkins-ci.org/download/plugins/ant/1.2/ant.hpi
- - http://updates.jenkins-ci.org/download/plugins/bazaar/1.22/bazaar.hpi
- - http://updates.jenkins-ci.org/download/plugins/build-name-setter/1.3/build-name-setter.hpi
- - http://updates.jenkins-ci.org/download/plugins/build-timeout/1.14/build-timeout.hpi
- - http://updates.jenkins-ci.org/download/plugins/claim/2.3/claim.hpi
- - http://updates.jenkins-ci.org/download/plugins/conditional-buildstep/1.3.3/conditional-buildstep.hpi
- - http://updates.jenkins-ci.org/download/plugins/copy-to-slave/1.4.3/copy-to-slave.hpi
- - http://updates.jenkins-ci.org/download/plugins/copyartifact/1.31/copyartifact.hpi
- - http://updates.jenkins-ci.org/download/plugins/crowd2/1.6/crowd2.hpi
- - http://updates.jenkins-ci.org/download/plugins/docker-plugin/0.7/docker-plugin.hpi
- - http://updates.jenkins-ci.org/download/plugins/dropdown-viewstabbar-plugin/1.6/dropdown-viewstabbar-plugin.hpi
- - http://updates.jenkins-ci.org/download/plugins/dynamic-axis/1.0.2/dynamic-axis.hpi
- - http://updates.jenkins-ci.org/download/plugins/ec2/1.18/ec2.hpi
- - http://updates.jenkins-ci.org/download/plugins/email-ext/2.38.2/email-ext.hpi
- - http://updates.jenkins-ci.org/download/plugins/envinject/1.90/envinject.hpi
- - http://updates.jenkins-ci.org/download/plugins/external-monitor-job/1.2/external-monitor-job.hpi
- - http://updates.jenkins-ci.org/download/plugins/gerrit-trigger/2.11.1/gerrit-trigger.hpi
- - http://updates.jenkins-ci.org/download/plugins/git-client/1.10.2/git-client.hpi
- - http://updates.jenkins-ci.org/download/plugins/git/2.2.6/git.hpi
- - http://updates.jenkins-ci.org/download/plugins/greenballs/1.14/greenballs.hpi
- - http://updates.jenkins-ci.org/download/plugins/groovy-postbuild/1.10/groovy-postbuild.hpi
- - http://updates.jenkins-ci.org/download/plugins/htmlpublisher/1.3/htmlpublisher.hpi
- - http://updates.jenkins-ci.org/download/plugins/instant-messaging/1.30/instant-messaging.hpi
- - http://updates.jenkins-ci.org/download/plugins/ircbot/2.25/ircbot.hpi
- - http://updates.jenkins-ci.org/download/plugins/jenkins-multijob-plugin/1.13/jenkins-multijob-plugin.hpi
- - http://updates.jenkins-ci.org/download/plugins/jobConfigHistory/2.9/jobConfigHistory.hpi
- - http://updates.jenkins-ci.org/download/plugins/jquery/1.7.2-1/jquery.hpi
- - http://updates.jenkins-ci.org/download/plugins/log-parser/1.0.8/log-parser.hpi
- - http://updates.jenkins-ci.org/download/plugins/mapdb-api/1.0.1.0/mapdb-api.hpi
- - http://updates.jenkins-ci.org/download/plugins/matrix-project/1.3/matrix-project.hpi
- - http://updates.jenkins-ci.org/download/plugins/matrixtieparent/1.2/matrixtieparent.hpi
- - http://updates.jenkins-ci.org/download/plugins/nodelabelparameter/1.5.1/nodelabelparameter.hpi
- - http://updates.jenkins-ci.org/download/plugins/parameterized-trigger/2.25/parameterized-trigger.hpi
- - http://updates.jenkins-ci.org/download/plugins/publish-over-ssh/1.11/publish-over-ssh.hpi
- - http://updates.jenkins-ci.org/download/plugins/repo/1.6/repo.hpi
- - http://updates.jenkins-ci.org/download/plugins/run-condition/1.0/run-condition.hpi
- - http://updates.jenkins-ci.org/download/plugins/scm-api/0.2/scm-api.hpi
- - http://updates.jenkins-ci.org/download/plugins/simple-theme-plugin/0.3/simple-theme-plugin.hpi
- - http://updates.jenkins-ci.org/download/plugins/ssh-agent/1.5/ssh-agent.hpi
- - http://updates.jenkins-ci.org/download/plugins/throttle-concurrents/1.8.3/throttle-concurrents.hpi
- - http://updates.jenkins-ci.org/download/plugins/timestamper/1.5.14/timestamper.hpi
- - http://updates.jenkins-ci.org/download/plugins/token-macro/1.10/token-macro.hpi
- - http://updates.jenkins-ci.org/download/plugins/urltrigger/0.37/urltrigger.hpi
- - http://updates.jenkins-ci.org/download/plugins/warnings/4.42/warnings.hpi
diff --git a/ansible/roles/linaro-jenkins-tools/tasks/main.yml b/ansible/roles/linaro-jenkins-tools/tasks/main.yml
deleted file mode 100644
index 19376e3..0000000
--- a/ansible/roles/linaro-jenkins-tools/tasks/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-- name: Install dependent packages
- apt: pkg={{item}}
- sudo: yes
- with_items:
- - python-lxml
-
-- name: Checkout linaro-jenkins-tools
- git: name=http://git.linaro.org/git/infrastructure/linaro-jenkins-tools.git
- dest=~/linaro-jenkins-tools
- tags:
- - git
- # Up to date checkout required for mangle-jobs
- - mangle-jobs
diff --git a/ansible/roles/new-publish/files/ssh_config_1 b/ansible/roles/new-publish/files/ssh_config_1
deleted file mode 100644
index 3b0a647..0000000
--- a/ansible/roles/new-publish/files/ssh_config_1
+++ /dev/null
@@ -1,10 +0,0 @@
-
-# Added by Ansible
-AuthorizedKeysFile /etc/ssh/user-authorized-keys/%u %h/.ssh/authorized_keys
-Subsystem sftp internal-sftp
-
-Match User publish-copy
- ChrootDirectory /mnt/publish
- ForceCommand internal-sftp
- AllowTcpForwarding no
- X11Forwarding no
diff --git a/ansible/roles/new-publish/tasks/main.yml b/ansible/roles/new-publish/tasks/main.yml
deleted file mode 100644
index 86d514e..0000000
--- a/ansible/roles/new-publish/tasks/main.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-- name: Create top-level publishing dir
- # Home dir must be owned by root for ssh ChrootDirectory to work
- file: dest={{publish_home}} state=directory mode=0755 owner=root group=root
- sudo: yes
-
-- name: Create publish group
- group: name=publish state=present
- sudo: yes
-
-- name: Create publish-copy user
- user: name=publish-copy comment='Publishing - transfer user'
- group=publish home={{publish_home}}
-# generate_ssh_key=yes
-# ssh_key_file={{publish_keys_dir}}/publish-copy
- sudo: yes
-
-- name: Create publish-trigger user
- user: name=publish-trigger comment='Publishing - trigger user'
- group=publish home={{publish_home}}
-# generate_ssh_key=yes
-# ssh_key_file={{publish_keys_dir}}/publish-trigger
- sudo: yes
-
-- name: Create upload dir
- # Actual uploads will happen to this dir
- # publish-copy should have write access there, publish-trigger
- # generally only read (cleanup can be handled by cronjob)
- file: dest={{publish_home}}/uploads state=directory mode=0755 owner=publish-copy group=publish
- sudo: yes
-
-
-- name: Create /etc/ssh/user-authorized-keys/
- file: dest={{publish_root}}/etc/ssh/user-authorized-keys/ state=directory mode=0755 owner=root group=root
- sudo: yes
-
-- name: Setup publish-copy user SSH restrictions
- template: src=publish-copy.j2
- dest={{publish_root}}/etc/ssh/user-authorized-keys/publish-copy
- owner=root group=root mode=0644
- sudo: yes
-
-- name: Setup publish-trigger user SSH restrictions
- template: src=publish-trigger.j2
- dest={{publish_root}}/etc/ssh/user-authorized-keys/publish-trigger
- owner=root group=root mode=0644
- sudo: yes
-
-- name: Disable external sftp
- lineinfile: state=absent regexp="Subsystem sftp /usr/lib/openssh/sftp-server"
- dest=/etc/ssh/sshd_config
- sudo: yes
- notify:
- - Restart SSHD
-
-- name: Check if sshd_config already contains needed config
- command: grep "^AuthorizedKeysFile" /etc/ssh/sshd_config
- register: result
- ignore_errors: yes
- changed_when: False
-
-#regexp="^AuthorizedKeysFile"
-- name: sshd_config - Update AuthorizedKeysFile location
- lineinfile:
- line="{{ lookup('file', 'ssh_config_1') }}"
- dest=/etc/ssh/sshd_config
- backup=yes
- sudo: yes
- when: result is defined and result.rc != 0
-
-- name: Create .ssh dir
- file: dest={{publish_home}}/.ssh state=directory mode=0755
- sudo: yes
-
-- name: Create known_hosts
- shell: ssh-keyscan snapshots.linaro.org staging.snapshots.linaro.org >{{publish_home}}/.ssh/known_hosts
- sudo: yes
-
-# New m3.medium instance has rather modest instance store which is used
-# as upload area, so clean it often
-- name: Set up upload area cleanup cronjob
- cron: name="Clean up upload area"
- job="/home/ubuntu/linaro-jenkins-tools/new-publish/clean-uploads"
- minute="*/15"
- cron_file=new-publish user=publish-copy
- sudo: yes
- tags:
- - cronjob
diff --git a/ansible/roles/new-publish/templates/publish-copy.j2 b/ansible/roles/new-publish/templates/publish-copy.j2
deleted file mode 100644
index 7fb57ff..0000000
--- a/ansible/roles/new-publish/templates/publish-copy.j2
+++ /dev/null
@@ -1 +0,0 @@
-command="/usr/lib/sftp-server",no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCu9W2GpCZK7buK0i7XapOsvJ2UtAtWTEMzlLaoxp9wCmxBn+jkXt12bj3KwQPd+39Pft2PDQdIfpHGIU0mYWHUnv7KLZkZioZTEPwpios7rumXBc0DjvP+nkRhVU5pvC3Tjq4CKdUi61+zEFAxlMXmDndGz7WBllGP/hMStujMaXBGaeOf5V7ivWHBa2DO2zFoT+VdxROgyWz+F/ClyFH6tM9BGSp8G19g7pJiQxj2mqftg8yHn3jnyVqb2vCI+4urIgn9lgGdbAikwHr7Zwfa7jT3Sq7i/MJXvXyS9YWTd45AX8TFUxQeh+rLpLJhombmp9QYfRnHJItnNIaTrGBD root@android-build.linaro.org \ No newline at end of file
diff --git a/ansible/roles/new-publish/templates/publish-trigger.j2 b/ansible/roles/new-publish/templates/publish-trigger.j2
deleted file mode 100644
index d265062..0000000
--- a/ansible/roles/new-publish/templates/publish-trigger.j2
+++ /dev/null
@@ -1 +0,0 @@
-command="/home/ubuntu/linaro-jenkins-tools/new-publish/propagate.py ${SSH_ORIGINAL_COMMAND#* }",no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYwM5B28Jhvum/lUXDnNC587cRjira7i6A00P3/m/jmtu5r3Ng18WRAG6Pn3rUwQ9JuB3FGb6CyDld/MNdmpHiLtw4zoj5y5eIGE8hgeAGNKszq9tFreo0msR83zN5nPVSSP4dQ1NhQj3agWaqxy/twsfDFoK2u8RiiJ1wsqRzIYqy8A+UDX5+WFN5kMPjSOyut9UeBf17N7Fo35kRlzDFU/TpHpEGy6vmtU268QImKodm11BU9mDgx736+d22GXjpbkvoqP853xKI1OL/nl4k+jMeJ3tYW35nVZrlz9nu00/1bwwf0nAkNdLxhJ/5zicPUJkwP4ehsXG/Od7cWYQd root@android-build.linaro.org \ No newline at end of file
diff --git a/ansible/roles/new-publish/vars/main.yml b/ansible/roles/new-publish/vars/main.yml
deleted file mode 100644
index 2aa2e79..0000000
--- a/ansible/roles/new-publish/vars/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-publish_root: ''
-publish_home: "{{publish_root}}/mnt/publish"
-
-# not ideal
-publish_keys_dir: "{{publish_home}}"
diff --git a/ansible/roles/publishing/templates/publish-copy.j2 b/ansible/roles/publishing/templates/publish-copy.j2
deleted file mode 100644
index d840e5d..0000000
--- a/ansible/roles/publishing/templates/publish-copy.j2
+++ /dev/null
@@ -1 +0,0 @@
-command="/usr/lib/sftp-server",no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding {{ lookup('file', keys_dir + '/publish-copy.pub') }} \ No newline at end of file
diff --git a/ansible/roles/publishing/templates/publish-trigger.j2 b/ansible/roles/publishing/templates/publish-trigger.j2
deleted file mode 100644
index 37cc03a..0000000
--- a/ansible/roles/publishing/templates/publish-trigger.j2
+++ /dev/null
@@ -1 +0,0 @@
-command="/home/ubuntu/new-publish/utils/new-publish/trigger ${SSH_ORIGINAL_COMMAND#* }",no-pty,no-port-forwarding,no-X11-forwarding,no-agent-forwarding {{ lookup('file', keys_dir + '/publish-trigger.pub') }} \ No newline at end of file
diff --git a/ansible/roles/reposeed/tasks/main.yml b/ansible/roles/reposeed/tasks/main.yml
deleted file mode 100644
index ecfd728..0000000
--- a/ansible/roles/reposeed/tasks/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-#- name: Create seed directory
-# file: state=directory path=/mnt2/seed
-# sudo: yes
-
-- name: Create seed symlink
- file: state=link src=jenkins/jobs/_extra/seed dest=/mnt2/seed
- force=yes
- sudo: yes
diff --git a/ansible/roles/squid/files/squid.conf b/ansible/roles/squid/files/squid.conf
deleted file mode 100644
index 9a9ce67..0000000
--- a/ansible/roles/squid/files/squid.conf
+++ /dev/null
@@ -1,7165 +0,0 @@
-# WELCOME TO SQUID 3.3.8
-# ----------------------------
-#
-# This is the documentation for the Squid configuration file.
-# This documentation can also be found online at:
-# http://www.squid-cache.org/Doc/config/
-#
-# You may wish to look at the Squid home page and wiki for the
-# FAQ and other documentation:
-# http://www.squid-cache.org/
-# http://wiki.squid-cache.org/SquidFaq
-# http://wiki.squid-cache.org/ConfigExamples
-#
-# This documentation shows what the defaults for various directives
-# happen to be. If you don't need to change the default, you should
-# leave the line out of your squid.conf in most cases.
-#
-# In some cases "none" refers to no default setting at all,
-# while in other cases it refers to the value of the option
-# - the comments for that keyword indicate if this is the case.
-#
-
-# Configuration options can be included using the "include" directive.
-# Include takes a list of files to include. Quoting and wildcards are
-# supported.
-#
-# For example,
-#
-# include /path/to/included/file/squid.acl.config
-#
-# Includes can be nested up to a hard-coded depth of 16 levels.
-# This arbitrary restriction is to prevent recursive include references
-# from causing Squid entering an infinite loop whilst trying to load
-# configuration files.
-#
-#
-# Conditional configuration
-#
-# If-statements can be used to make configuration directives
-# depend on conditions:
-#
-# if <CONDITION>
-# ... regular configuration directives ...
-# [else
-# ... regular configuration directives ...]
-# endif
-#
-# The else part is optional. The keywords "if", "else", and "endif"
-# must be typed on their own lines, as if they were regular
-# configuration directives.
-#
-# NOTE: An else-if condition is not supported.
-#
-# These individual conditions types are supported:
-#
-# true
-# Always evaluates to true.
-# false
-# Always evaluates to false.
-# <integer> = <integer>
-# Equality comparison of two integer numbers.
-#
-#
-# SMP-Related Macros
-#
-# The following SMP-related preprocessor macros can be used.
-#
-# ${process_name} expands to the current Squid process "name"
-# (e.g., squid1, squid2, or cache1).
-#
-# ${process_number} expands to the current Squid process
-# identifier, which is an integer number (e.g., 1, 2, 3) unique
-# across all Squid processes.
-
-# TAG: broken_vary_encoding
-# This option is not yet supported by Squid-3.
-#Default:
-# none
-
-# TAG: cache_vary
-# This option is not yet supported by Squid-3.
-#Default:
-# none
-
-# TAG: collapsed_forwarding
-# This option is not yet supported by Squid-3. see http://bugs.squid-cache.org/show_bug.cgi?id=3495
-#Default:
-# none
-
-# TAG: error_map
-# This option is not yet supported by Squid-3.
-#Default:
-# none
-
-# TAG: external_refresh_check
-# This option is not yet supported by Squid-3.
-#Default:
-# none
-
-# TAG: ignore_ims_on_miss
-# This option is not yet supported by Squid-3.
-#Default:
-# none
-
-# TAG: location_rewrite_program
-# This option is not yet supported by Squid-3.
-#Default:
-# none
-
-# TAG: refresh_stale_hit
-# This option is not yet supported by Squid-3.
-#Default:
-# none
-
-# TAG: storeurl_access
-# This option is not yet supported by this version of Squid-3. Please try a later release.
-#Default:
-# none
-
-# TAG: ignore_expect_100
-# Remove this line. The HTTP/1.1 feature is now fully supported by default.
-#Default:
-# none
-
-# TAG: dns_v4_fallback
-# Remove this line. Squid performs a 'Happy Eyeballs' algorithm, the 'fallback' algorithm is no longer relevant.
-#Default:
-# none
-
-# TAG: ftp_list_width
-# Remove this line. Configure FTP page display using the CSS controls in errorpages.css instead.
-#Default:
-# none
-
-# TAG: maximum_single_addr_tries
-# Replaced by connect_retries. The behaviour has changed, please read the documentation before altering.
-#Default:
-# none
-
-# TAG: update_headers
-# Remove this line. The feature is supported by default in storage types where update is implemented.
-#Default:
-# none
-
-# TAG: url_rewrite_concurrency
-# Remove this line. Set the 'concurrency=' option of url_rewrite_children instead.
-#Default:
-# none
-
-# TAG: dns_testnames
-# Remove this line. DNS is no longer tested on startup.
-#Default:
-# none
-
-# TAG: extension_methods
-# Remove this line. All valid methods for HTTP are accepted by default.
-#Default:
-# none
-
-# TAG: zero_buffers
-#Default:
-# none
-
-# TAG: incoming_rate
-#Default:
-# none
-
-# TAG: server_http11
-# Remove this line. HTTP/1.1 is supported by default.
-#Default:
-# none
-
-# TAG: upgrade_http0.9
-# Remove this line. ICY/1.0 streaming protocol is supported by default.
-#Default:
-# none
-
-# TAG: zph_local
-# Alter these entries. Use the qos_flows directive instead.
-#Default:
-# none
-
-# TAG: header_access
-# Since squid-3.0 replace with request_header_access or reply_header_access
-# depending on whether you wish to match client requests or server replies.
-#Default:
-# none
-
-# TAG: httpd_accel_no_pmtu_disc
-# Since squid-3.0 use the 'disable-pmtu-discovery' flag on http_port instead.
-#Default:
-# none
-
-# TAG: wais_relay_host
-# Replace this line with 'cache_peer' configuration.
-#Default:
-# none
-
-# TAG: wais_relay_port
-# Replace this line with 'cache_peer' configuration.
-#Default:
-# none
-
-# OPTIONS FOR AUTHENTICATION
-# -----------------------------------------------------------------------------
-
-# TAG: auth_param
-# This is used to define parameters for the various authentication
-# schemes supported by Squid.
-#
-# format: auth_param scheme parameter [setting]
-#
-# The order in which authentication schemes are presented to the client is
-# dependent on the order the scheme first appears in config file. IE
-# has a bug (it's not RFC 2617 compliant) in that it will use the basic
-# scheme if basic is the first entry presented, even if more secure
-# schemes are presented. For now use the order in the recommended
-# settings section below. If other browsers have difficulties (don't
-# recognize the schemes offered even if you are using basic) either
-# put basic first, or disable the other schemes (by commenting out their
-# program entry).
-#
-# Once an authentication scheme is fully configured, it can only be
-# shutdown by shutting squid down and restarting. Changes can be made on
-# the fly and activated with a reconfigure. I.E. You can change to a
-# different helper, but not unconfigure the helper completely.
-#
-# Please note that while this directive defines how Squid processes
-# authentication it does not automatically activate authentication.
-# To use authentication you must in addition make use of ACLs based
-# on login name in http_access (proxy_auth, proxy_auth_regex or
-# external with %LOGIN used in the format tag). The browser will be
-# challenged for authentication on the first such acl encountered
-# in http_access processing and will also be re-challenged for new
-# login credentials if the request is being denied by a proxy_auth
-# type acl.
-#
-# WARNING: authentication can't be used in a transparently intercepting
-# proxy as the client then thinks it is talking to an origin server and
-# not the proxy. This is a limitation of bending the TCP/IP protocol to
-# transparently intercepting port 80, not a limitation in Squid.
-# Ports flagged 'transparent', 'intercept', or 'tproxy' have
-# authentication disabled.
-#
-# === Parameters for the basic scheme follow. ===
-#
-# "program" cmdline
-# Specify the command for the external authenticator. Such a program
-# reads a line containing "username password" and replies "OK" or
-# "ERR" in an endless loop. "ERR" responses may optionally be followed
-# by a error description available as %m in the returned error page.
-# If you use an authenticator, make sure you have 1 acl of type
-# proxy_auth.
-#
-# By default, the basic authentication scheme is not used unless a
-# program is specified.
-#
-# If you want to use the traditional NCSA proxy authentication, set
-# this line to something like
-#
-# auth_param basic program /usr/lib/squid3/basic_ncsa_auth /usr/etc/passwd
-#
-# "utf8" on|off
-# HTTP uses iso-latin-1 as character set, while some authentication
-# backends such as LDAP expects UTF-8. If this is set to on Squid will
-# translate the HTTP iso-latin-1 charset to UTF-8 before sending the
-# username & password to the helper.
-#
-# "children" numberofchildren [startup=N] [idle=N] [concurrency=N]
-# The maximum number of authenticator processes to spawn. If you start too few
-# Squid will have to wait for them to process a backlog of credential
-# verifications, slowing it down. When password verifications are
-# done via a (slow) network you are likely to need lots of
-# authenticator processes.
-#
-# The startup= and idle= options permit some skew in the exact amount
-# run. A minimum of startup=N will begin during startup and reconfigure.
-# Squid will start more in groups of up to idle=N in an attempt to meet
-# traffic needs and to keep idle=N free above those traffic needs up to
-# the maximum.
-#
-# The concurrency= option sets the number of concurrent requests the
-# helper can process. The default of 0 is used for helpers who only
-# supports one request at a time. Setting this to a number greater than
-# 0 changes the protocol used to include a channel number first on the
-# request/response line, allowing multiple requests to be sent to the
-# same helper in parallel without waiting for the response.
-# Must not be set unless it's known the helper supports this.
-#
-# auth_param basic children 20 startup=0 idle=1
-#
-# "realm" realmstring
-# Specifies the realm name which is to be reported to the
-# client for the basic proxy authentication scheme (part of
-# the text the user will see when prompted their username and
-# password). There is no default.
-# auth_param basic realm Squid proxy-caching web server
-#
-# "credentialsttl" timetolive
-# Specifies how long squid assumes an externally validated
-# username:password pair is valid for - in other words how
-# often the helper program is called for that user. Set this
-# low to force revalidation with short lived passwords. Note
-# setting this high does not impact your susceptibility
-# to replay attacks unless you are using an one-time password
-# system (such as SecureID). If you are using such a system,
-# you will be vulnerable to replay attacks unless you also
-# use the max_user_ip ACL in an http_access rule.
-#
-# "casesensitive" on|off
-# Specifies if usernames are case sensitive. Most user databases are
-# case insensitive allowing the same username to be spelled using both
-# lower and upper case letters, but some are case sensitive. This
-# makes a big difference for user_max_ip ACL processing and similar.
-# auth_param basic casesensitive off
-#
-# === Parameters for the digest scheme follow ===
-#
-# "program" cmdline
-# Specify the command for the external authenticator. Such
-# a program reads a line containing "username":"realm" and
-# replies with the appropriate H(A1) value hex encoded or
-# ERR if the user (or his H(A1) hash) does not exists.
-# See rfc 2616 for the definition of H(A1).
-# "ERR" responses may optionally be followed by a error description
-# available as %m in the returned error page.
-#
-# By default, the digest authentication scheme is not used unless a
-# program is specified.
-#
-# If you want to use a digest authenticator, set this line to
-# something like
-#
-# auth_param digest program /usr/lib/squid3/digest_pw_auth /usr/etc/digpass
-#
-# "utf8" on|off
-# HTTP uses iso-latin-1 as character set, while some authentication
-# backends such as LDAP expects UTF-8. If this is set to on Squid will
-# translate the HTTP iso-latin-1 charset to UTF-8 before sending the
-# username & password to the helper.
-#
-# "children" numberofchildren [startup=N] [idle=N] [concurrency=N]
-# The maximum number of authenticator processes to spawn (default 5).
-# If you start too few Squid will have to wait for them to
-# process a backlog of H(A1) calculations, slowing it down.
-# When the H(A1) calculations are done via a (slow) network
-# you are likely to need lots of authenticator processes.
-#
-# The startup= and idle= options permit some skew in the exact amount
-# run. A minimum of startup=N will begin during startup and reconfigure.
-# Squid will start more in groups of up to idle=N in an attempt to meet
-# traffic needs and to keep idle=N free above those traffic needs up to
-# the maximum.
-#
-# The concurrency= option sets the number of concurrent requests the
-# helper can process. The default of 0 is used for helpers who only
-# supports one request at a time. Setting this to a number greater than
-# 0 changes the protocol used to include a channel number first on the
-# request/response line, allowing multiple requests to be sent to the
-# same helper in parallel without waiting for the response.
-# Must not be set unless it's known the helper supports this.
-#
-# auth_param digest children 20 startup=0 idle=1
-#
-# "realm" realmstring
-# Specifies the realm name which is to be reported to the
-# client for the digest proxy authentication scheme (part of
-# the text the user will see when prompted their username and
-# password). There is no default.
-# auth_param digest realm Squid proxy-caching web server
-#
-# "nonce_garbage_interval" timeinterval
-# Specifies the interval that nonces that have been issued
-# to client_agent's are checked for validity.
-#
-# "nonce_max_duration" timeinterval
-# Specifies the maximum length of time a given nonce will be
-# valid for.
-#
-# "nonce_max_count" number
-# Specifies the maximum number of times a given nonce can be
-# used.
-#
-# "nonce_strictness" on|off
-# Determines if squid requires strict increment-by-1 behavior
-# for nonce counts, or just incrementing (off - for use when
-# user agents generate nonce counts that occasionally miss 1
-# (ie, 1,2,4,6)). Default off.
-#
-# "check_nonce_count" on|off
-# This directive if set to off can disable the nonce count check
-# completely to work around buggy digest qop implementations in
-# certain mainstream browser versions. Default on to check the
-# nonce count to protect from authentication replay attacks.
-#
-# "post_workaround" on|off
-# This is a workaround to certain buggy browsers who sends
-# an incorrect request digest in POST requests when reusing
-# the same nonce as acquired earlier on a GET request.
-#
-# === NTLM scheme options follow ===
-#
-# "program" cmdline
-# Specify the command for the external NTLM authenticator.
-# Such a program reads exchanged NTLMSSP packets with
-# the browser via Squid until authentication is completed.
-# If you use an NTLM authenticator, make sure you have 1 acl
-# of type proxy_auth. By default, the NTLM authenticator_program
-# is not used.
-#
-# NOTE: In Debian the ntlm_auth program is distributed in the winbindd package
-# which is required for this auth scheme to work
-#
-# auth_param ntlm program /usr/bin/ntlm_auth
-#
-# "children" numberofchildren [startup=N] [idle=N]
-# The maximum number of authenticator processes to spawn (default 5).
-# If you start too few Squid will have to wait for them to
-# process a backlog of credential verifications, slowing it
-# down. When credential verifications are done via a (slow)
-# network you are likely to need lots of authenticator
-# processes.
-#
-# The startup= and idle= options permit some skew in the exact amount
-# run. A minimum of startup=N will begin during startup and reconfigure.
-# Squid will start more in groups of up to idle=N in an attempt to meet
-# traffic needs and to keep idle=N free above those traffic needs up to
-# the maximum.
-#
-# auth_param ntlm children 20 startup=0 idle=1
-#
-# "keep_alive" on|off
-# If you experience problems with PUT/POST requests when using the
-# Negotiate authentication scheme then you can try setting this to
-# off. This will cause Squid to forcibly close the connection on
-# the initial requests where the browser asks which schemes are
-# supported by the proxy.
-#
-# auth_param ntlm keep_alive on
-#
-# === Options for configuring the NEGOTIATE auth-scheme follow ===
-#
-# "program" cmdline
-# Specify the command for the external Negotiate authenticator.
-# This protocol is used in Microsoft Active-Directory enabled setups with
-# the Microsoft Internet Explorer or Mozilla Firefox browsers.
-# Its main purpose is to exchange credentials with the Squid proxy
-# using the Kerberos mechanisms.
-# If you use a Negotiate authenticator, make sure you have at least
-# one acl of type proxy_auth active. By default, the negotiate
-# authenticator_program is not used.
-# The only supported program for this role is the ntlm_auth
-# program distributed as part of Samba, version 4 or later.
-#
-# NOTE: In Debian the ntlm_auth program is distributed in the winbindd package
-# which is required for this auth scheme to work
-#
-# auth_param negotiate program /usr/bin/ntlm_auth --helper-protocol=gss-spnego
-#
-# "children" numberofchildren [startup=N] [idle=N]
-# The maximum number of authenticator processes to spawn (default 5).
-# If you start too few Squid will have to wait for them to
-# process a backlog of credential verifications, slowing it
-# down. When credential verifications are done via a (slow)
-# network you are likely to need lots of authenticator
-# processes.
-#
-# The startup= and idle= options permit some skew in the exact amount
-# run. A minimum of startup=N will begin during startup and reconfigure.
-# Squid will start more in groups of up to idle=N in an attempt to meet
-# traffic needs and to keep idle=N free above those traffic needs up to
-# the maximum.
-#
-# auth_param negotiate children 20 startup=0 idle=1
-#
-# "keep_alive" on|off
-# If you experience problems with PUT/POST requests when using the
-# Negotiate authentication scheme then you can try setting this to
-# off. This will cause Squid to forcibly close the connection on
-# the initial requests where the browser asks which schemes are
-# supported by the proxy.
-#
-# auth_param negotiate keep_alive on
-#
-#
-# Examples:
-#
-##Recommended minimum configuration per scheme:
-##auth_param negotiate program <uncomment and complete this line to activate>
-##auth_param negotiate children 20 startup=0 idle=1
-##auth_param negotiate keep_alive on
-##
-##auth_param ntlm program <uncomment and complete this line to activate>
-##auth_param ntlm children 20 startup=0 idle=1
-##auth_param ntlm keep_alive on
-##
-##auth_param digest program <uncomment and complete this line>
-##auth_param digest children 20 startup=0 idle=1
-##auth_param digest realm Squid proxy-caching web server
-##auth_param digest nonce_garbage_interval 5 minutes
-##auth_param digest nonce_max_duration 30 minutes
-##auth_param digest nonce_max_count 50
-##
-##auth_param basic program <uncomment and complete this line>
-##auth_param basic children 5 startup=5 idle=1
-##auth_param basic realm Squid proxy-caching web server
-##auth_param basic credentialsttl 2 hours
-#Default:
-# none
-
-# TAG: authenticate_cache_garbage_interval
-# The time period between garbage collection across the username cache.
-# This is a trade-off between memory utilization (long intervals - say
-# 2 days) and CPU (short intervals - say 1 minute). Only change if you
-# have good reason to.
-#Default:
-# authenticate_cache_garbage_interval 1 hour
-
-# TAG: authenticate_ttl
-# The time a user & their credentials stay in the logged in
-# user cache since their last request. When the garbage
-# interval passes, all user credentials that have passed their
-# TTL are removed from memory.
-#Default:
-# authenticate_ttl 1 hour
-
-# TAG: authenticate_ip_ttl
-# If you use proxy authentication and the 'max_user_ip' ACL,
-# this directive controls how long Squid remembers the IP
-# addresses associated with each user. Use a small value
-# (e.g., 60 seconds) if your users might change addresses
-# quickly, as is the case with dialup. You might be safe
-# using a larger value (e.g., 2 hours) in a corporate LAN
-# environment with relatively static address assignments.
-#Default:
-# authenticate_ip_ttl 0 seconds
-
-# ACCESS CONTROLS
-# -----------------------------------------------------------------------------
-
-# TAG: external_acl_type
-# This option defines external acl classes using a helper program
-# to look up the status
-#
-# external_acl_type name [options] FORMAT.. /path/to/helper [helper arguments..]
-#
-# Options:
-#
-# ttl=n TTL in seconds for cached results (defaults to 3600
-# for 1 hour)
-# negative_ttl=n
-# TTL for cached negative lookups (default same
-# as ttl)
-# children-max=n
-# Maximum number of acl helper processes spawned to service
-# external acl lookups of this type. (default 20)
-# children-startup=n
-# Minimum number of acl helper processes to spawn during
-# startup and reconfigure to service external acl lookups
-# of this type. (default 0)
-# children-idle=n
-# Number of acl helper processes to keep ahead of traffic
-# loads. Squid will spawn this many at once whenever load
-# rises above the capabilities of existing processes.
-# Up to the value of children-max. (default 1)
-# concurrency=n concurrency level per process. Only used with helpers
-# capable of processing more than one query at a time.
-# cache=n limit the result cache size, default is unbounded.
-# grace=n Percentage remaining of TTL where a refresh of a
-# cached entry should be initiated without needing to
-# wait for a new reply. (default is for no grace period)
-# protocol=2.5 Compatibility mode for Squid-2.5 external acl helpers
-# ipv4 / ipv6 IP protocol used to communicate with this helper.
-# The default is to auto-detect IPv6 and use it when available.
-#
-# FORMAT specifications
-#
-# %LOGIN Authenticated user login name
-# %EXT_USER Username from previous external acl
-# %EXT_LOG Log details from previous external acl
-# %EXT_TAG Tag from previous external acl
-# %IDENT Ident user name
-# %SRC Client IP
-# %SRCPORT Client source port
-# %URI Requested URI
-# %DST Requested host
-# %PROTO Requested protocol
-# %PORT Requested port
-# %PATH Requested URL path
-# %METHOD Request method
-# %MYADDR Squid interface address
-# %MYPORT Squid http_port number
-# %PATH Requested URL-path (including query-string if any)
-# %USER_CERT SSL User certificate in PEM format
-# %USER_CERTCHAIN SSL User certificate chain in PEM format
-# %USER_CERT_xx SSL User certificate subject attribute xx
-# %USER_CA_xx SSL User certificate issuer attribute xx
-#
-# %>{Header} HTTP request header "Header"
-# %>{Hdr:member}
-# HTTP request header "Hdr" list member "member"
-# %>{Hdr:;member}
-# HTTP request header list member using ; as
-# list separator. ; can be any non-alphanumeric
-# character.
-#
-# %<{Header} HTTP reply header "Header"
-# %<{Hdr:member}
-# HTTP reply header "Hdr" list member "member"
-# %<{Hdr:;member}
-# HTTP reply header list member using ; as
-# list separator. ; can be any non-alphanumeric
-# character.
-#
-# %ACL The name of the ACL being tested.
-# %DATA The ACL arguments. If not used then any arguments
-# is automatically added at the end of the line
-# sent to the helper.
-# NOTE: this will encode the arguments as one token,
-# whereas the default will pass each separately.
-#
-# %% The percent sign. Useful for helpers which need
-# an unchanging input format.
-#
-# In addition to the above, any string specified in the referencing
-# acl will also be included in the helper request line, after the
-# specified formats (see the "acl external" directive)
-#
-# The helper receives lines per the above format specification,
-# and returns lines starting with OK or ERR indicating the validity
-# of the request and optionally followed by additional keywords with
-# more details.
-#
-# General result syntax:
-#
-# OK/ERR keyword=value ...
-#
-# Defined keywords:
-#
-# user= The users name (login)
-# password= The users password (for login= cache_peer option)
-# message= Message describing the reason. Available as %o
-# in error pages
-# tag= Apply a tag to a request (for both ERR and OK results)
-# Only sets a tag, does not alter existing tags.
-# log= String to be logged in access.log. Available as
-# %ea in logformat specifications
-#
-# If protocol=3.0 (the default) then URL escaping is used to protect
-# each value in both requests and responses.
-#
-# If using protocol=2.5 then all values need to be enclosed in quotes
-# if they may contain whitespace, or the whitespace escaped using \.
-# And quotes or \ characters within the keyword value must be \ escaped.
-#
-# When using the concurrency= option the protocol is changed by
-# introducing a query channel tag infront of the request/response.
-# The query channel tag is a number between 0 and concurrency-1.
-#Default:
-# none
-
-# TAG: acl
-# Defining an Access List
-#
-# Every access list definition must begin with an aclname and acltype,
-# followed by either type-specific arguments or a quoted filename that
-# they are read from.
-#
-# acl aclname acltype argument ...
-# acl aclname acltype "file" ...
-#
-# When using "file", the file should contain one item per line.
-#
-# By default, regular expressions are CASE-SENSITIVE.
-# To make them case-insensitive, use the -i option. To return case-sensitive
-# use the +i option between patterns, or make a new ACL line without -i.
-#
-# Some acl types require suspending the current request in order
-# to access some external data source.
-# Those which do are marked with the tag [slow], those which
-# don't are marked as [fast].
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl
-# for further information
-#
-# ***** ACL TYPES AVAILABLE *****
-#
-# acl aclname src ip-address/mask ... # clients IP address [fast]
-# acl aclname src addr1-addr2/mask ... # range of addresses [fast]
-# acl aclname dst ip-address/mask ... # URL host's IP address [slow]
-# acl aclname localip ip-address/mask ... # IP address the client connected to [fast]
-#
-# acl aclname arp mac-address ... (xx:xx:xx:xx:xx:xx notation)
-# # The arp ACL requires the special configure option --enable-arp-acl.
-# # Furthermore, the ARP ACL code is not portable to all operating systems.
-# # It works on Linux, Solaris, Windows, FreeBSD, and some
-# # other *BSD variants.
-# # [fast]
-# #
-# # NOTE: Squid can only determine the MAC address for clients that are on
-# # the same subnet. If the client is on a different subnet,
-# # then Squid cannot find out its MAC address.
-#
-# acl aclname srcdomain .foo.com ...
-# # reverse lookup, from client IP [slow]
-# acl aclname dstdomain .foo.com ...
-# # Destination server from URL [fast]
-# acl aclname srcdom_regex [-i] \.foo\.com ...
-# # regex matching client name [slow]
-# acl aclname dstdom_regex [-i] \.foo\.com ...
-# # regex matching server [fast]
-# #
-# # For dstdomain and dstdom_regex a reverse lookup is tried if a IP
-# # based URL is used and no match is found. The name "none" is used
-# # if the reverse lookup fails.
-#
-# acl aclname src_as number ...
-# acl aclname dst_as number ...
-# # [fast]
-# # Except for access control, AS numbers can be used for
-# # routing of requests to specific caches. Here's an
-# # example for routing all requests for AS#1241 and only
-# # those to mycache.mydomain.net:
-# # acl asexample dst_as 1241
-# # cache_peer_access mycache.mydomain.net allow asexample
-# # cache_peer_access mycache_mydomain.net deny all
-#
-# acl aclname peername myPeer ...
-# # [fast]
-# # match against a named cache_peer entry
-# # set unique name= on cache_peer lines for reliable use.
-#
-# acl aclname time [day-abbrevs] [h1:m1-h2:m2]
-# # [fast]
-# # day-abbrevs:
-# # S - Sunday
-# # M - Monday
-# # T - Tuesday
-# # W - Wednesday
-# # H - Thursday
-# # F - Friday
-# # A - Saturday
-# # h1:m1 must be less than h2:m2
-#
-# acl aclname url_regex [-i] ^http:// ...
-# # regex matching on whole URL [fast]
-# acl aclname urllogin [-i] [^a-zA-Z0-9] ...
-# # regex matching on URL login field
-# acl aclname urlpath_regex [-i] \.gif$ ...
-# # regex matching on URL path [fast]
-#
-# acl aclname port 80 70 21 0-1024... # destination TCP port [fast]
-# # ranges are alloed
-# acl aclname localport 3128 ... # TCP port the client connected to [fast]
-# # NP: for interception mode this is usually '80'
-#
-# acl aclname myportname 3128 ... # http(s)_port name [fast]
-#
-# acl aclname proto HTTP FTP ... # request protocol [fast]
-#
-# acl aclname method GET POST ... # HTTP request method [fast]
-#
-# acl aclname http_status 200 301 500- 400-403 ...
-# # status code in reply [fast]
-#
-# acl aclname browser [-i] regexp ...
-# # pattern match on User-Agent header (see also req_header below) [fast]
-#
-# acl aclname referer_regex [-i] regexp ...
-# # pattern match on Referer header [fast]
-# # Referer is highly unreliable, so use with care
-#
-# acl aclname ident username ...
-# acl aclname ident_regex [-i] pattern ...
-# # string match on ident output [slow]
-# # use REQUIRED to accept any non-null ident.
-#
-# acl aclname proxy_auth [-i] username ...
-# acl aclname proxy_auth_regex [-i] pattern ...
-# # perform http authentication challenge to the client and match against
-# # supplied credentials [slow]
-# #
-# # takes a list of allowed usernames.
-# # use REQUIRED to accept any valid username.
-# #
-# # Will use proxy authentication in forward-proxy scenarios, and plain
-# # http authenticaiton in reverse-proxy scenarios
-# #
-# # NOTE: when a Proxy-Authentication header is sent but it is not
-# # needed during ACL checking the username is NOT logged
-# # in access.log.
-# #
-# # NOTE: proxy_auth requires a EXTERNAL authentication program
-# # to check username/password combinations (see
-# # auth_param directive).
-# #
-# # NOTE: proxy_auth can't be used in a transparent/intercepting proxy
-# # as the browser needs to be configured for using a proxy in order
-# # to respond to proxy authentication.
-#
-# acl aclname snmp_community string ...
-# # A community string to limit access to your SNMP Agent [fast]
-# # Example:
-# #
-# # acl snmppublic snmp_community public
-#
-# acl aclname maxconn number
-# # This will be matched when the client's IP address has
-# # more than <number> TCP connections established. [fast]
-# # NOTE: This only measures direct TCP links so X-Forwarded-For
-# # indirect clients are not counted.
-#
-# acl aclname max_user_ip [-s] number
-# # This will be matched when the user attempts to log in from more
-# # than <number> different ip addresses. The authenticate_ip_ttl
-# # parameter controls the timeout on the ip entries. [fast]
-# # If -s is specified the limit is strict, denying browsing
-# # from any further IP addresses until the ttl has expired. Without
-# # -s Squid will just annoy the user by "randomly" denying requests.
-# # (the counter is reset each time the limit is reached and a
-# # request is denied)
-# # NOTE: in acceleration mode or where there is mesh of child proxies,
-# # clients may appear to come from multiple addresses if they are
-# # going through proxy farms, so a limit of 1 may cause user problems.
-#
-# acl aclname random probability
-# # Pseudo-randomly match requests. Based on the probability given.
-# # Probability may be written as a decimal (0.333), fraction (1/3)
-# # or ratio of matches:non-matches (3:5).
-#
-# acl aclname req_mime_type [-i] mime-type ...
-# # regex match against the mime type of the request generated
-# # by the client. Can be used to detect file upload or some
-# # types HTTP tunneling requests [fast]
-# # NOTE: This does NOT match the reply. You cannot use this
-# # to match the returned file type.
-#
-# acl aclname req_header header-name [-i] any\.regex\.here
-# # regex match against any of the known request headers. May be
-# # thought of as a superset of "browser", "referer" and "mime-type"
-# # ACL [fast]
-#
-# acl aclname rep_mime_type [-i] mime-type ...
-# # regex match against the mime type of the reply received by
-# # squid. Can be used to detect file download or some
-# # types HTTP tunneling requests. [fast]
-# # NOTE: This has no effect in http_access rules. It only has
-# # effect in rules that affect the reply data stream such as
-# # http_reply_access.
-#
-# acl aclname rep_header header-name [-i] any\.regex\.here
-# # regex match against any of the known reply headers. May be
-# # thought of as a superset of "browser", "referer" and "mime-type"
-# # ACLs [fast]
-#
-# acl aclname external class_name [arguments...]
-# # external ACL lookup via a helper class defined by the
-# # external_acl_type directive [slow]
-#
-# acl aclname user_cert attribute values...
-# # match against attributes in a user SSL certificate
-# # attribute is one of DN/C/O/CN/L/ST [fast]
-#
-# acl aclname ca_cert attribute values...
-# # match against attributes a users issuing CA SSL certificate
-# # attribute is one of DN/C/O/CN/L/ST [fast]
-#
-# acl aclname ext_user username ...
-# acl aclname ext_user_regex [-i] pattern ...
-# # string match on username returned by external acl helper [slow]
-# # use REQUIRED to accept any non-null user name.
-#
-# acl aclname tag tagvalue ...
-# # string match on tag returned by external acl helper [slow]
-#
-# acl aclname hier_code codename ...
-# # string match against squid hierarchy code(s); [fast]
-# # e.g., DIRECT, PARENT_HIT, NONE, etc.
-# #
-# # NOTE: This has no effect in http_access rules. It only has
-# # effect in rules that affect the reply data stream such as
-# # http_reply_access.
-#
-#
-# Examples:
-# acl macaddress arp 09:00:2b:23:45:67
-# acl myexample dst_as 1241
-# acl password proxy_auth REQUIRED
-# acl fileupload req_mime_type -i ^multipart/form-data$
-# acl javascript rep_mime_type -i ^application/x-javascript$
-#
-#Default:
-# ACLs all, manager, localhost, and to_localhost are predefined.
-#
-#
-# Recommended minimum configuration:
-#
-
-# Example rule allowing access from your local networks.
-# Adapt to list your (internal) IP networks from where browsing
-# should be allowed
-acl localnet src 10.0.0.0/8 # RFC1918 possible internal network
-#acl localnet src 172.16.0.0/12 # RFC1918 possible internal network
-#acl localnet src 192.168.0.0/16 # RFC1918 possible internal network
-#acl localnet src fc00::/7 # RFC 4193 local private network range
-#acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
-acl safe_dst_host dstdomain .linaro.org .kernel.org security.ubuntu.com private-ppa.launchpad.net ppa.launchpad.net .archive.ubuntu.com ports.ubuntu.com .googlesource.com
-
-acl SSL_ports port 443
-acl Safe_ports port 80 # http
-acl Safe_ports port 21 # ftp
-acl Safe_ports port 443 # https
-acl Safe_ports port 70 # gopher
-acl Safe_ports port 210 # wais
-acl Safe_ports port 1025-65535 # unregistered ports
-acl Safe_ports port 280 # http-mgmt
-acl Safe_ports port 488 # gss-http
-acl Safe_ports port 591 # filemaker
-acl Safe_ports port 777 # multiling http
-acl CONNECT method CONNECT
-
-# TAG: follow_x_forwarded_for
-# Allowing or Denying the X-Forwarded-For header to be followed to
-# find the original source of a request.
-#
-# Requests may pass through a chain of several other proxies
-# before reaching us. The X-Forwarded-For header will contain a
-# comma-separated list of the IP addresses in the chain, with the
-# rightmost address being the most recent.
-#
-# If a request reaches us from a source that is allowed by this
-# configuration item, then we consult the X-Forwarded-For header
-# to see where that host received the request from. If the
-# X-Forwarded-For header contains multiple addresses, we continue
-# backtracking until we reach an address for which we are not allowed
-# to follow the X-Forwarded-For header, or until we reach the first
-# address in the list. For the purpose of ACL used in the
-# follow_x_forwarded_for directive the src ACL type always matches
-# the address we are testing and srcdomain matches its rDNS.
-#
-# The end result of this process is an IP address that we will
-# refer to as the indirect client address. This address may
-# be treated as the client address for access control, ICAP, delay
-# pools and logging, depending on the acl_uses_indirect_client,
-# icap_uses_indirect_client, delay_pool_uses_indirect_client,
-# log_uses_indirect_client and tproxy_uses_indirect_client options.
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#
-# SECURITY CONSIDERATIONS:
-#
-# Any host for which we follow the X-Forwarded-For header
-# can place incorrect information in the header, and Squid
-# will use the incorrect information as if it were the
-# source address of the request. This may enable remote
-# hosts to bypass any access control restrictions that are
-# based on the client's source addresses.
-#
-# For example:
-#
-# acl localhost src 127.0.0.1
-# acl my_other_proxy srcdomain .proxy.example.com
-# follow_x_forwarded_for allow localhost
-# follow_x_forwarded_for allow my_other_proxy
-#Default:
-# X-Forwarded-For header will be ignored.
-
-# TAG: acl_uses_indirect_client on|off
-# Controls whether the indirect client address
-# (see follow_x_forwarded_for) is used instead of the
-# direct client address in acl matching.
-#
-# NOTE: maxconn ACL considers direct TCP links and indirect
-# clients will always have zero. So no match.
-#Default:
-# acl_uses_indirect_client on
-
-# TAG: delay_pool_uses_indirect_client on|off
-# Controls whether the indirect client address
-# (see follow_x_forwarded_for) is used instead of the
-# direct client address in delay pools.
-#Default:
-# delay_pool_uses_indirect_client on
-
-# TAG: log_uses_indirect_client on|off
-# Controls whether the indirect client address
-# (see follow_x_forwarded_for) is used instead of the
-# direct client address in the access log.
-#Default:
-# log_uses_indirect_client on
-
-# TAG: tproxy_uses_indirect_client on|off
-# Controls whether the indirect client address
-# (see follow_x_forwarded_for) is used instead of the
-# direct client address when spoofing the outgoing client.
-#
-# This has no effect on requests arriving in non-tproxy
-# mode ports.
-#
-# SECURITY WARNING: Usage of this option is dangerous
-# and should not be used trivially. Correct configuration
-# of follow_x_forewarded_for with a limited set of trusted
-# sources is required to prevent abuse of your proxy.
-#Default:
-# tproxy_uses_indirect_client off
-
-# TAG: http_access
-# Allowing or Denying access based on defined access lists
-#
-# Access to the HTTP port:
-# http_access allow|deny [!]aclname ...
-#
-# NOTE on default values:
-#
-# If there are no "access" lines present, the default is to deny
-# the request.
-#
-# If none of the "access" lines cause a match, the default is the
-# opposite of the last line in the list. If the last line was
-# deny, the default is allow. Conversely, if the last line
-# is allow, the default will be deny. For these reasons, it is a
-# good idea to have an "deny all" entry at the end of your access
-# lists to avoid potential confusion.
-#
-# This clause supports both fast and slow acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#
-#Default:
-# Deny, unless rules exist in squid.conf.
-#
-
-#
-# Recommended minimum Access Permission configuration:
-#
-# Deny requests to certain unsafe ports
-http_access deny !Safe_ports
-
-# Deny CONNECT to other than secure SSL ports
-http_access deny CONNECT !SSL_ports
-
-# Only allow cachemgr access from localhost
-http_access allow localhost manager
-http_access deny manager
-
-# We strongly recommend the following be uncommented to protect innocent
-# web applications running on the proxy server who think the only
-# one who can access services on "localhost" is a local user
-#http_access deny to_localhost
-
-#
-# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS
-#
-
-# Example rule allowing access from your local networks.
-# Adapt localnet in the ACL section to list your (internal) IP networks
-# from where browsing should be allowed
-http_access deny !safe_dst_host
-http_access allow localnet
-http_access allow localhost
-
-# And finally deny all other access to this proxy
-http_access deny all
-
-# TAG: adapted_http_access
-# Allowing or Denying access based on defined access lists
-#
-# Essentially identical to http_access, but runs after redirectors
-# and ICAP/eCAP adaptation. Allowing access control based on their
-# output.
-#
-# If not set then only http_access is used.
-#Default:
-# Allow, unless rules exist in squid.conf.
-
-# TAG: http_reply_access
-# Allow replies to client requests. This is complementary to http_access.
-#
-# http_reply_access allow|deny [!] aclname ...
-#
-# NOTE: if there are no access lines present, the default is to allow
-# all replies.
-#
-# If none of the access lines cause a match the opposite of the
-# last line will apply. Thus it is good practice to end the rules
-# with an "allow all" or "deny all" entry.
-#
-# This clause supports both fast and slow acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#Default:
-# Allow, unless rules exist in squid.conf.
-
-# TAG: icp_access
-# Allowing or Denying access to the ICP port based on defined
-# access lists
-#
-# icp_access allow|deny [!]aclname ...
-#
-# NOTE: The default if no icp_access lines are present is to
-# deny all traffic. This default may cause problems with peers
-# using ICP.
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#
-## Allow ICP queries from local networks only
-##icp_access allow localnet
-##icp_access deny all
-#Default:
-# Deny, unless rules exist in squid.conf.
-
-# TAG: htcp_access
-# Allowing or Denying access to the HTCP port based on defined
-# access lists
-#
-# htcp_access allow|deny [!]aclname ...
-#
-# See also htcp_clr_access for details on access control for
-# cache purge (CLR) HTCP messages.
-#
-# NOTE: The default if no htcp_access lines are present is to
-# deny all traffic. This default may cause problems with peers
-# using the htcp option.
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#
-## Allow HTCP queries from local networks only
-##htcp_access allow localnet
-##htcp_access deny all
-#Default:
-# Deny, unless rules exist in squid.conf.
-
-# TAG: htcp_clr_access
-# Allowing or Denying access to purge content using HTCP based
-# on defined access lists.
-# See htcp_access for details on general HTCP access control.
-#
-# htcp_clr_access allow|deny [!]aclname ...
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#
-## Allow HTCP CLR requests from trusted peers
-#acl htcp_clr_peer src 192.0.2.2 2001:DB8::2
-#htcp_clr_access allow htcp_clr_peer
-#htcp_clr_access deny all
-#Default:
-# Deny, unless rules exist in squid.conf.
-
-# TAG: miss_access
-# Determins whether network access is permitted when satisfying a request.
-#
-# For example;
-# to force your neighbors to use you as a sibling instead of
-# a parent.
-#
-# acl localclients src 192.0.2.0/24 2001:DB8::a:0/64
-# miss_access deny !localclients
-# miss_access allow all
-#
-# This means only your local clients are allowed to fetch relayed/MISS
-# replies from the network and all other clients can only fetch cached
-# objects (HITs).
-#
-# The default for this setting allows all clients who passed the
-# http_access rules to relay via this proxy.
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#Default:
-# Allow, unless rules exist in squid.conf.
-
-# TAG: ident_lookup_access
-# A list of ACL elements which, if matched, cause an ident
-# (RFC 931) lookup to be performed for this request. For
-# example, you might choose to always perform ident lookups
-# for your main multi-user Unix boxes, but not for your Macs
-# and PCs. By default, ident lookups are not performed for
-# any requests.
-#
-# To enable ident lookups for specific client addresses, you
-# can follow this example:
-#
-# acl ident_aware_hosts src 198.168.1.0/24
-# ident_lookup_access allow ident_aware_hosts
-# ident_lookup_access deny all
-#
-# Only src type ACL checks are fully supported. A srcdomain
-# ACL might work at times, but it will not always provide
-# the correct result.
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#Default:
-# Unless rules exist in squid.conf, IDENT is not fetched.
-
-# TAG: reply_body_max_size size [acl acl...]
-# This option specifies the maximum size of a reply body. It can be
-# used to prevent users from downloading very large files, such as
-# MP3's and movies. When the reply headers are received, the
-# reply_body_max_size lines are processed, and the first line where
-# all (if any) listed ACLs are true is used as the maximum body size
-# for this reply.
-#
-# This size is checked twice. First when we get the reply headers,
-# we check the content-length value. If the content length value exists
-# and is larger than the allowed size, the request is denied and the
-# user receives an error message that says "the request or reply
-# is too large." If there is no content-length, and the reply
-# size exceeds this limit, the client's connection is just closed
-# and they will receive a partial reply.
-#
-# WARNING: downstream caches probably can not detect a partial reply
-# if there is no content-length header, so they will cache
-# partial responses and give them out as hits. You should NOT
-# use this option if you have downstream caches.
-#
-# WARNING: A maximum size smaller than the size of squid's error messages
-# will cause an infinite loop and crash squid. Ensure that the smallest
-# non-zero value you use is greater that the maximum header size plus
-# the size of your largest error page.
-#
-# If you set this parameter none (the default), there will be
-# no limit imposed.
-#
-# Configuration Format is:
-# reply_body_max_size SIZE UNITS [acl ...]
-# ie.
-# reply_body_max_size 10 MB
-#
-#Default:
-# No limit is applied.
-
-# NETWORK OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: http_port
-# Usage: port [mode] [options]
-# hostname:port [mode] [options]
-# 1.2.3.4:port [mode] [options]
-#
-# The socket addresses where Squid will listen for HTTP client
-# requests. You may specify multiple socket addresses.
-# There are three forms: port alone, hostname with port, and
-# IP address with port. If you specify a hostname or IP
-# address, Squid binds the socket to that specific
-# address. Most likely, you do not need to bind to a specific
-# address, so you can use the port number alone.
-#
-# If you are running Squid in accelerator mode, you
-# probably want to listen on port 80 also, or instead.
-#
-# The -a command line option may be used to specify additional
-# port(s) where Squid listens for proxy request. Such ports will
-# be plain proxy ports with no options.
-#
-# You may specify multiple socket addresses on multiple lines.
-#
-# Modes:
-#
-# intercept Support for IP-Layer interception of
-# outgoing requests without browser settings.
-# NP: disables authentication and IPv6 on the port.
-#
-# tproxy Support Linux TPROXY for spoofing outgoing
-# connections using the client IP address.
-# NP: disables authentication and maybe IPv6 on the port.
-#
-# accel Accelerator / reverse proxy mode
-#
-# ssl-bump For each CONNECT request allowed by ssl_bump ACLs,
-# establish secure connection with the client and with
-# the server, decrypt HTTPS messages as they pass through
-# Squid, and treat them as unencrypted HTTP messages,
-# becoming the man-in-the-middle.
-#
-# The ssl_bump option is required to fully enable
-# bumping of CONNECT requests.
-#
-# Omitting the mode flag causes default forward proxy mode to be used.
-#
-#
-# Accelerator Mode Options:
-#
-# defaultsite=domainname
-# What to use for the Host: header if it is not present
-# in a request. Determines what site (not origin server)
-# accelerators should consider the default.
-#
-# no-vhost Disable using HTTP/1.1 Host header for virtual domain support.
-#
-# protocol= Protocol to reconstruct accelerated requests with.
-# Defaults to http for http_port and https for
-# https_port
-#
-# vport Virtual host port support. Using the http_port number
-# instead of the port passed on Host: headers.
-#
-# vport=NN Virtual host port support. Using the specified port
-# number instead of the port passed on Host: headers.
-#
-# act-as-origin
-# Act as if this Squid is the origin server.
-# This currently means generate new Date: and Expires:
-# headers on HIT instead of adding Age:.
-#
-# ignore-cc Ignore request Cache-Control headers.
-#
-# WARNING: This option violates HTTP specifications if
-# used in non-accelerator setups.
-#
-# allow-direct Allow direct forwarding in accelerator mode. Normally
-# accelerated requests are denied direct forwarding as if
-# never_direct was used.
-#
-# WARNING: this option opens accelerator mode to security
-# vulnerabilities usually only affecting in interception
-# mode. Make sure to protect forwarding with suitable
-# http_access rules when using this.
-#
-#
-# SSL Bump Mode Options:
-# In addition to these options ssl-bump requires TLS/SSL options.
-#
-# generate-host-certificates[=<on|off>]
-# Dynamically create SSL server certificates for the
-# destination hosts of bumped CONNECT requests.When
-# enabled, the cert and key options are used to sign
-# generated certificates. Otherwise generated
-# certificate will be selfsigned.
-# If there is a CA certificate lifetime of the generated
-# certificate equals lifetime of the CA certificate. If
-# generated certificate is selfsigned lifetime is three
-# years.
-# This option is enabled by default when ssl-bump is used.
-# See the ssl-bump option above for more information.
-#
-# dynamic_cert_mem_cache_size=SIZE
-# Approximate total RAM size spent on cached generated
-# certificates. If set to zero, caching is disabled. The
-# default value is 4MB.
-#
-# TLS / SSL Options:
-#
-# cert= Path to SSL certificate (PEM format).
-#
-# key= Path to SSL private key file (PEM format)
-# if not specified, the certificate file is
-# assumed to be a combined certificate and
-# key file.
-#
-# version= The version of SSL/TLS supported
-# 1 automatic (default)
-# 2 SSLv2 only
-# 3 SSLv3 only
-# 4 TLSv1.0 only
-# 5 TLSv1.1 only
-# 6 TLSv1.2 only
-#
-# cipher= Colon separated list of supported ciphers.
-# NOTE: some ciphers such as EDH ciphers depend on
-# additional settings. If those settings are
-# omitted the ciphers may be silently ignored
-# by the OpenSSL library.
-#
-# options= Various SSL implementation options. The most important
-# being:
-# NO_SSLv2 Disallow the use of SSLv2
-# NO_SSLv3 Disallow the use of SSLv3
-# NO_TLSv1 Disallow the use of TLSv1.0
-# NO_TLSv1_1 Disallow the use of TLSv1.1
-# NO_TLSv1_2 Disallow the use of TLSv1.2
-# SINGLE_DH_USE Always create a new key when using
-# temporary/ephemeral DH key exchanges
-# ALL Enable various bug workarounds
-# suggested as "harmless" by OpenSSL
-# Be warned that this reduces SSL/TLS
-# strength to some attacks.
-# See OpenSSL SSL_CTX_set_options documentation for a
-# complete list of options.
-#
-# clientca= File containing the list of CAs to use when
-# requesting a client certificate.
-#
-# cafile= File containing additional CA certificates to
-# use when verifying client certificates. If unset
-# clientca will be used.
-#
-# capath= Directory containing additional CA certificates
-# and CRL lists to use when verifying client certificates.
-#
-# crlfile= File of additional CRL lists to use when verifying
-# the client certificate, in addition to CRLs stored in
-# the capath. Implies VERIFY_CRL flag below.
-#
-# dhparams= File containing DH parameters for temporary/ephemeral
-# DH key exchanges. See OpenSSL documentation for details
-# on how to create this file.
-# WARNING: EDH ciphers will be silently disabled if this
-# option is not set.
-#
-# sslflags= Various flags modifying the use of SSL:
-# DELAYED_AUTH
-# Don't request client certificates
-# immediately, but wait until acl processing
-# requires a certificate (not yet implemented).
-# NO_DEFAULT_CA
-# Don't use the default CA lists built in
-# to OpenSSL.
-# NO_SESSION_REUSE
-# Don't allow for session reuse. Each connection
-# will result in a new SSL session.
-# VERIFY_CRL
-# Verify CRL lists when accepting client
-# certificates.
-# VERIFY_CRL_ALL
-# Verify CRL lists for all certificates in the
-# client certificate chain.
-#
-# sslcontext= SSL session ID context identifier.
-#
-# Other Options:
-#
-# connection-auth[=on|off]
-# use connection-auth=off to tell Squid to prevent
-# forwarding Microsoft connection oriented authentication
-# (NTLM, Negotiate and Kerberos)
-#
-# disable-pmtu-discovery=
-# Control Path-MTU discovery usage:
-# off lets OS decide on what to do (default).
-# transparent disable PMTU discovery when transparent
-# support is enabled.
-# always disable always PMTU discovery.
-#
-# In many setups of transparently intercepting proxies
-# Path-MTU discovery can not work on traffic towards the
-# clients. This is the case when the intercepting device
-# does not fully track connections and fails to forward
-# ICMP must fragment messages to the cache server. If you
-# have such setup and experience that certain clients
-# sporadically hang or never complete requests set
-# disable-pmtu-discovery option to 'transparent'.
-#
-# name= Specifies a internal name for the port. Defaults to
-# the port specification (port or addr:port)
-#
-# tcpkeepalive[=idle,interval,timeout]
-# Enable TCP keepalive probes of idle connections.
-# In seconds; idle is the initial time before TCP starts
-# probing the connection, interval how often to probe, and
-# timeout the time before giving up.
-#
-# If you run Squid on a dual-homed machine with an internal
-# and an external interface we recommend you to specify the
-# internal address:port in http_port. This way Squid will only be
-# visible on the internal address.
-#
-#
-
-# Squid normally listens to port 3128
-http_port 3128
-
-# TAG: https_port
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# Usage: [ip:]port cert=certificate.pem [key=key.pem] [mode] [options...]
-#
-# The socket address where Squid will listen for client requests made
-# over TLS or SSL connections. Commonly referred to as HTTPS.
-#
-# This is most useful for situations where you are running squid in
-# accelerator mode and you want to do the SSL work at the accelerator level.
-#
-# You may specify multiple socket addresses on multiple lines,
-# each with their own SSL certificate and/or options.
-#
-# Modes:
-#
-# accel Accelerator / reverse proxy mode
-#
-# intercept Support for IP-Layer interception of
-# outgoing requests without browser settings.
-# NP: disables authentication and IPv6 on the port.
-#
-# tproxy Support Linux TPROXY for spoofing outgoing
-# connections using the client IP address.
-# NP: disables authentication and maybe IPv6 on the port.
-#
-# ssl-bump For each intercepted connection allowed by ssl_bump
-# ACLs, establish a secure connection with the client and with
-# the server, decrypt HTTPS messages as they pass through
-# Squid, and treat them as unencrypted HTTP messages,
-# becoming the man-in-the-middle.
-#
-# An "ssl_bump server-first" match is required to
-# fully enable bumping of intercepted SSL connections.
-#
-# Requires tproxy or intercept.
-#
-# Omitting the mode flag causes default forward proxy mode to be used.
-#
-#
-# See http_port for a list of generic options
-#
-#
-# SSL Options:
-#
-# cert= Path to SSL certificate (PEM format).
-#
-# key= Path to SSL private key file (PEM format)
-# if not specified, the certificate file is
-# assumed to be a combined certificate and
-# key file.
-#
-# version= The version of SSL/TLS supported
-# 1 automatic (default)
-# 2 SSLv2 only
-# 3 SSLv3 only
-# 4 TLSv1 only
-#
-# cipher= Colon separated list of supported ciphers.
-#
-# options= Various SSL engine options. The most important
-# being:
-# NO_SSLv2 Disallow the use of SSLv2
-# NO_SSLv3 Disallow the use of SSLv3
-# NO_TLSv1 Disallow the use of TLSv1
-# SINGLE_DH_USE Always create a new key when using
-# temporary/ephemeral DH key exchanges
-# See src/ssl_support.c or OpenSSL SSL_CTX_set_options
-# documentation for a complete list of options.
-#
-# clientca= File containing the list of CAs to use when
-# requesting a client certificate.
-#
-# cafile= File containing additional CA certificates to
-# use when verifying client certificates. If unset
-# clientca will be used.
-#
-# capath= Directory containing additional CA certificates
-# and CRL lists to use when verifying client certificates.
-#
-# crlfile= File of additional CRL lists to use when verifying
-# the client certificate, in addition to CRLs stored in
-# the capath. Implies VERIFY_CRL flag below.
-#
-# dhparams= File containing DH parameters for temporary/ephemeral
-# DH key exchanges.
-#
-# sslflags= Various flags modifying the use of SSL:
-# DELAYED_AUTH
-# Don't request client certificates
-# immediately, but wait until acl processing
-# requires a certificate (not yet implemented).
-# NO_DEFAULT_CA
-# Don't use the default CA lists built in
-# to OpenSSL.
-# NO_SESSION_REUSE
-# Don't allow for session reuse. Each connection
-# will result in a new SSL session.
-# VERIFY_CRL
-# Verify CRL lists when accepting client
-# certificates.
-# VERIFY_CRL_ALL
-# Verify CRL lists for all certificates in the
-# client certificate chain.
-#
-# sslcontext= SSL session ID context identifier.
-#
-# generate-host-certificates[=<on|off>]
-# Dynamically create SSL server certificates for the
-# destination hosts of bumped SSL requests.When
-# enabled, the cert and key options are used to sign
-# generated certificates. Otherwise generated
-# certificate will be selfsigned.
-# If there is CA certificate life time of generated
-# certificate equals lifetime of CA certificate. If
-# generated certificate is selfsigned lifetime is three
-# years.
-# This option is enabled by default when SslBump is used.
-# See the sslBump option above for more information.
-#
-# dynamic_cert_mem_cache_size=SIZE
-# Approximate total RAM size spent on cached generated
-# certificates. If set to zero, caching is disabled. The
-# default value is 4MB.
-#
-# See http_port for a list of available options.
-#Default:
-# none
-
-# TAG: tcp_outgoing_tos
-# Allows you to select a TOS/Diffserv value for packets outgoing
-# on the server side, based on an ACL.
-#
-# tcp_outgoing_tos ds-field [!]aclname ...
-#
-# Example where normal_service_net uses the TOS value 0x00
-# and good_service_net uses 0x20
-#
-# acl normal_service_net src 10.0.0.0/24
-# acl good_service_net src 10.0.1.0/24
-# tcp_outgoing_tos 0x00 normal_service_net
-# tcp_outgoing_tos 0x20 good_service_net
-#
-# TOS/DSCP values really only have local significance - so you should
-# know what you're specifying. For more information, see RFC2474,
-# RFC2475, and RFC3260.
-#
-# The TOS/DSCP byte must be exactly that - a octet value 0 - 255, or
-# "default" to use whatever default your host has. Note that in
-# practice often only multiples of 4 is usable as the two rightmost bits
-# have been redefined for use by ECN (RFC 3168 section 23.1).
-#
-# Processing proceeds in the order specified, and stops at first fully
-# matching line.
-#Default:
-# none
-
-# TAG: clientside_tos
-# Allows you to select a TOS/Diffserv value for packets being transmitted
-# on the client-side, based on an ACL.
-#
-# clientside_tos ds-field [!]aclname ...
-#
-# Example where normal_service_net uses the TOS value 0x00
-# and good_service_net uses 0x20
-#
-# acl normal_service_net src 10.0.0.0/24
-# acl good_service_net src 10.0.1.0/24
-# clientside_tos 0x00 normal_service_net
-# clientside_tos 0x20 good_service_net
-#
-# Note: This feature is incompatible with qos_flows. Any TOS values set here
-# will be overwritten by TOS values in qos_flows.
-#Default:
-# none
-
-# TAG: tcp_outgoing_mark
-# Note: This option is only available if Squid is rebuilt with the
-# Packet MARK (Linux)
-#
-# Allows you to apply a Netfilter mark value to outgoing packets
-# on the server side, based on an ACL.
-#
-# tcp_outgoing_mark mark-value [!]aclname ...
-#
-# Example where normal_service_net uses the mark value 0x00
-# and good_service_net uses 0x20
-#
-# acl normal_service_net src 10.0.0.0/24
-# acl good_service_net src 10.0.1.0/24
-# tcp_outgoing_mark 0x00 normal_service_net
-# tcp_outgoing_mark 0x20 good_service_net
-#Default:
-# none
-
-# TAG: clientside_mark
-# Note: This option is only available if Squid is rebuilt with the
-# Packet MARK (Linux)
-#
-# Allows you to apply a Netfilter mark value to packets being transmitted
-# on the client-side, based on an ACL.
-#
-# clientside_mark mark-value [!]aclname ...
-#
-# Example where normal_service_net uses the mark value 0x00
-# and good_service_net uses 0x20
-#
-# acl normal_service_net src 10.0.0.0/24
-# acl good_service_net src 10.0.1.0/24
-# clientside_mark 0x00 normal_service_net
-# clientside_mark 0x20 good_service_net
-#
-# Note: This feature is incompatible with qos_flows. Any mark values set here
-# will be overwritten by mark values in qos_flows.
-#Default:
-# none
-
-# TAG: qos_flows
-# Allows you to select a TOS/DSCP value to mark outgoing
-# connections with, based on where the reply was sourced. For
-# platforms using netfilter, allows you to set a netfilter mark
-# value instead of, or in addition to, a TOS value.
-#
-# TOS values really only have local significance - so you should
-# know what you're specifying. For more information, see RFC2474,
-# RFC2475, and RFC3260.
-#
-# The TOS/DSCP byte must be exactly that - a octet value 0 - 255. Note that
-# in practice often only multiples of 4 is usable as the two rightmost bits
-# have been redefined for use by ECN (RFC 3168 section 23.1).
-#
-# Mark values can be any unsigned 32-bit integer value.
-#
-# This setting is configured by setting the following values:
-#
-# tos|mark Whether to set TOS or netfilter mark values
-#
-# local-hit=0xFF Value to mark local cache hits.
-#
-# sibling-hit=0xFF Value to mark hits from sibling peers.
-#
-# parent-hit=0xFF Value to mark hits from parent peers.
-#
-# miss=0xFF[/mask] Value to mark cache misses. Takes precedence
-# over the preserve-miss feature (see below), unless
-# mask is specified, in which case only the bits
-# specified in the mask are written.
-#
-# The TOS variant of the following features are only possible on Linux
-# and require your kernel to be patched with the TOS preserving ZPH
-# patch, available from http://zph.bratcheda.org
-# No patch is needed to preserve the netfilter mark, which will work
-# with all variants of netfilter.
-#
-# disable-preserve-miss
-# This option disables the preservation of the TOS or netfilter
-# mark. By default, the existing TOS or netfilter mark value of
-# the response coming from the remote server will be retained
-# and masked with miss-mark.
-# NOTE: in the case of a netfilter mark, the mark must be set on
-# the connection (using the CONNMARK target) not on the packet
-# (MARK target).
-#
-# miss-mask=0xFF
-# Allows you to mask certain bits in the TOS or mark value
-# received from the remote server, before copying the value to
-# the TOS sent towards clients.
-# Default for tos: 0xFF (TOS from server is not changed).
-# Default for mark: 0xFFFFFFFF (mark from server is not changed).
-#
-# All of these features require the --enable-zph-qos compilation flag
-# (enabled by default). Netfilter marking also requires the
-# libnetfilter_conntrack libraries (--with-netfilter-conntrack) and
-# libcap 2.09+ (--with-libcap).
-#
-#Default:
-# none
-
-# TAG: tcp_outgoing_address
-# Allows you to map requests to different outgoing IP addresses
-# based on the username or source address of the user making
-# the request.
-#
-# tcp_outgoing_address ipaddr [[!]aclname] ...
-#
-# For example;
-# Forwarding clients with dedicated IPs for certain subnets.
-#
-# acl normal_service_net src 10.0.0.0/24
-# acl good_service_net src 10.0.2.0/24
-#
-# tcp_outgoing_address 2001:db8::c001 good_service_net
-# tcp_outgoing_address 10.1.0.2 good_service_net
-#
-# tcp_outgoing_address 2001:db8::beef normal_service_net
-# tcp_outgoing_address 10.1.0.1 normal_service_net
-#
-# tcp_outgoing_address 2001:db8::1
-# tcp_outgoing_address 10.1.0.3
-#
-# Processing proceeds in the order specified, and stops at first fully
-# matching line.
-#
-# Squid will add an implicit IP version test to each line.
-# Requests going to IPv4 websites will use the outgoing 10.1.0.* addresses.
-# Requests going to IPv6 websites will use the outgoing 2001:db8:* addresses.
-#
-#
-# NOTE: The use of this directive using client dependent ACLs is
-# incompatible with the use of server side persistent connections. To
-# ensure correct results it is best to set server_persistent_connections
-# to off when using this directive in such configurations.
-#
-# NOTE: The use of this directive to set a local IP on outgoing TCP links
-# is incompatible with using TPROXY to set client IP out outbound TCP links.
-# When needing to contact peers use the no-tproxy cache_peer option and the
-# client_dst_passthru directive re-enable normal forwarding such as this.
-#
-#Default:
-# Address selection is performed by the operating system.
-
-# TAG: host_verify_strict
-# Regardless of this option setting, when dealing with intercepted
-# traffic, Squid always verifies that the destination IP address matches
-# the Host header domain or IP (called 'authority form URL').
-#
-# This enforcement is performed to satisfy a MUST-level requirement in
-# RFC 2616 section 14.23: "The Host field value MUST represent the naming
-# authority of the origin server or gateway given by the original URL".
-#
-# When set to ON:
-# Squid always responds with an HTTP 409 (Conflict) error
-# page and logs a security warning if there is no match.
-#
-# Squid verifies that the destination IP address matches
-# the Host header for forward-proxy and reverse-proxy traffic
-# as well. For those traffic types, Squid also enables the
-# following checks, comparing the corresponding Host header
-# and Request-URI components:
-#
-# * The host names (domain or IP) must be identical,
-# but valueless or missing Host header disables all checks.
-# For the two host names to match, both must be either IP
-# or FQDN.
-#
-# * Port numbers must be identical, but if a port is missing
-# the scheme-default port is assumed.
-#
-#
-# When set to OFF (the default):
-# Squid allows suspicious requests to continue but logs a
-# security warning and blocks caching of the response.
-#
-# * Forward-proxy traffic is not checked at all.
-#
-# * Reverse-proxy traffic is not checked at all.
-#
-# * Intercepted traffic which passes verification is handled
-# according to client_dst_passthru.
-#
-# * Intercepted requests which fail verification are sent
-# to the client original destination instead of DIRECT.
-# This overrides 'client_dst_passthru off'.
-#
-# For now suspicious intercepted CONNECT requests are always
-# responded to with an HTTP 409 (Conflict) error page.
-#
-#
-# SECURITY NOTE:
-#
-# As described in CVE-2009-0801 when the Host: header alone is used
-# to determine the destination of a request it becomes trivial for
-# malicious scripts on remote websites to bypass browser same-origin
-# security policy and sandboxing protections.
-#
-# The cause of this is that such applets are allowed to perform their
-# own HTTP stack, in which case the same-origin policy of the browser
-# sandbox only verifies that the applet tries to contact the same IP
-# as from where it was loaded at the IP level. The Host: header may
-# be different from the connected IP and approved origin.
-#
-#Default:
-# host_verify_strict off
-
-# TAG: client_dst_passthru
-# With NAT or TPROXY intercepted traffic Squid may pass the request
-# directly to the original client destination IP or seek a faster
-# source using the HTTP Host header.
-#
-# Using Host to locate alternative servers can provide faster
-# connectivity with a range of failure recovery options.
-# But can also lead to connectivity trouble when the client and
-# server are attempting stateful interactions unaware of the proxy.
-#
-# This option (on by default) prevents alternative DNS entries being
-# located to send intercepted traffic DIRECT to an origin server.
-# The clients original destination IP and port will be used instead.
-#
-# Regardless of this option setting, when dealing with intercepted
-# traffic Squid will verify the Host: header and any traffic which
-# fails Host verification will be treated as if this option were ON.
-#
-# see host_verify_strict for details on the verification process.
-#Default:
-# client_dst_passthru on
-
-# SSL OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: ssl_unclean_shutdown
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# Some browsers (especially MSIE) bugs out on SSL shutdown
-# messages.
-#Default:
-# ssl_unclean_shutdown off
-
-# TAG: ssl_engine
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# The OpenSSL engine to use. You will need to set this if you
-# would like to use hardware SSL acceleration for example.
-#Default:
-# none
-
-# TAG: sslproxy_client_certificate
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# Client SSL Certificate to use when proxying https:// URLs
-#Default:
-# none
-
-# TAG: sslproxy_client_key
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# Client SSL Key to use when proxying https:// URLs
-#Default:
-# none
-
-# TAG: sslproxy_version
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# SSL version level to use when proxying https:// URLs
-#
-# The versions of SSL/TLS supported:
-#
-# 1 automatic (default)
-# 2 SSLv2 only
-# 3 SSLv3 only
-# 4 TLSv1.0 only
-# 5 TLSv1.1 only
-# 6 TLSv1.2 only
-#Default:
-# automatic SSL/TLS version negotiation
-
-# TAG: sslproxy_options
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# SSL implementation options to use when proxying https:// URLs
-#
-# The most important being:
-#
-# NO_SSLv2 Disallow the use of SSLv2
-# NO_SSLv3 Disallow the use of SSLv3
-# NO_TLSv1 Disallow the use of TLSv1.0
-# NO_TLSv1_1 Disallow the use of TLSv1.1
-# NO_TLSv1_2 Disallow the use of TLSv1.2
-# SINGLE_DH_USE
-# Always create a new key when using temporary/ephemeral
-# DH key exchanges
-# SSL_OP_NO_TICKET
-# Disable use of RFC5077 session tickets. Some servers
-# may have problems understanding the TLS extension due
-# to ambiguous specification in RFC4507.
-# ALL Enable various bug workarounds suggested as "harmless"
-# by OpenSSL. Be warned that this may reduce SSL/TLS
-# strength to some attacks.
-#
-# See the OpenSSL SSL_CTX_set_options documentation for a
-# complete list of possible options.
-#Default:
-# none
-
-# TAG: sslproxy_cipher
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# SSL cipher list to use when proxying https:// URLs
-#
-# Colon separated list of supported ciphers.
-#Default:
-# none
-
-# TAG: sslproxy_cafile
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# file containing CA certificates to use when verifying server
-# certificates while proxying https:// URLs
-#Default:
-# none
-
-# TAG: sslproxy_capath
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# directory containing CA certificates to use when verifying
-# server certificates while proxying https:// URLs
-#Default:
-# none
-
-# TAG: ssl_bump
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# This option is consulted when a CONNECT request is received on
-# an http_port (or a new connection is intercepted at an
-# https_port), provided that port was configured with an ssl-bump
-# flag. The subsequent data on the connection is either treated as
-# HTTPS and decrypted OR tunneled at TCP level without decryption,
-# depending on the first bumping "mode" which ACLs match.
-#
-# ssl_bump <mode> [!]acl ...
-#
-# The following bumping modes are supported:
-#
-# client-first
-# Allow bumping of the connection. Establish a secure connection
-# with the client first, then connect to the server. This old mode
-# does not allow Squid to mimic server SSL certificate and does
-# not work with intercepted SSL connections.
-#
-# server-first
-# Allow bumping of the connection. Establish a secure connection
-# with the server first, then establish a secure connection with
-# the client, using a mimicked server certificate. Works with both
-# CONNECT requests and intercepted SSL connections.
-#
-# none
-# Become a TCP tunnel without decoding the connection.
-# Works with both CONNECT requests and intercepted SSL
-# connections. This is the default behavior when no
-# ssl_bump option is given or no ssl_bump ACLs match.
-#
-# By default, no connections are bumped.
-#
-# The first matching ssl_bump option wins. If no ACLs match, the
-# connection is not bumped. Unlike most allow/deny ACL lists, ssl_bump
-# does not have an implicit "negate the last given option" rule. You
-# must make that rule explicit if you convert old ssl_bump allow/deny
-# rules that rely on such an implicit rule.
-#
-# This clause supports both fast and slow acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#
-# See also: http_port ssl-bump, https_port ssl-bump
-#
-#
-# # Example: Bump all requests except those originating from
-# # localhost or those going to example.com.
-#
-# acl broken_sites dstdomain .example.com
-# ssl_bump none localhost
-# ssl_bump none broken_sites
-# ssl_bump server-first all
-#Default:
-# Does not bump unless rules are present in squid.conf
-
-# TAG: sslproxy_flags
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# Various flags modifying the use of SSL while proxying https:// URLs:
-# DONT_VERIFY_PEER Accept certificates that fail verification.
-# For refined control, see sslproxy_cert_error.
-# NO_DEFAULT_CA Don't use the default CA list built in
-# to OpenSSL.
-#Default:
-# none
-
-# TAG: sslproxy_cert_error
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# Use this ACL to bypass server certificate validation errors.
-#
-# For example, the following lines will bypass all validation errors
-# when talking to servers for example.com. All other
-# validation errors will result in ERR_SECURE_CONNECT_FAIL error.
-#
-# acl BrokenButTrustedServers dstdomain example.com
-# sslproxy_cert_error allow BrokenButTrustedServers
-# sslproxy_cert_error deny all
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-# Using slow acl types may result in server crashes
-#
-# Without this option, all server certificate validation errors
-# terminate the transaction to protect Squid and the client.
-#
-# SECURITY WARNING:
-# Bypassing validation errors is dangerous because an
-# error usually implies that the server cannot be trusted
-# and the connection may be insecure.
-#
-# See also: sslproxy_flags and DONT_VERIFY_PEER.
-#Default:
-# Server certificate errors terminate the transaction.
-
-# TAG: sslproxy_cert_sign
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-#
-# sslproxy_cert_sign <signing algorithm> acl ...
-#
-# The following certificate signing algorithms are supported:
-#
-# signTrusted
-# Sign using the configured CA certificate which is usually
-# placed in and trusted by end-user browsers. This is the
-# default for trusted origin server certificates.
-#
-# signUntrusted
-# Sign to guarantee an X509_V_ERR_CERT_UNTRUSTED browser error.
-# This is the default for untrusted origin server certificates
-# that are not self-signed (see ssl::certUntrusted).
-#
-# signSelf
-# Sign using a self-signed certificate with the right CN to
-# generate a X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT error in the
-# browser. This is the default for self-signed origin server
-# certificates (see ssl::certSelfSigned).
-#
-# This clause only supports fast acl types.
-#
-# When sslproxy_cert_sign acl(s) match, Squid uses the corresponding
-# signing algorithm to generate the certificate and ignores all
-# subsequent sslproxy_cert_sign options (the first match wins). If no
-# acl(s) match, the default signing algorithm is determined by errors
-# detected when obtaining and validating the origin server certificate.
-#
-# WARNING: SQUID_X509_V_ERR_DOMAIN_MISMATCH and ssl:certDomainMismatch can
-# be used with sslproxy_cert_adapt, but if and only if Squid is bumping a
-# CONNECT request that carries a domain name. In all other cases (CONNECT
-# to an IP address or an intercepted SSL connection), Squid cannot detect
-# the domain mismatch at certificate generation time when
-# bump-server-first is used.
-#Default:
-# none
-
-# TAG: sslproxy_cert_adapt
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-#
-# sslproxy_cert_adapt <adaptation algorithm> acl ...
-#
-# The following certificate adaptation algorithms are supported:
-#
-# setValidAfter
-# Sets the "Not After" property to the "Not After" property of
-# the CA certificate used to sign generated certificates.
-#
-# setValidBefore
-# Sets the "Not Before" property to the "Not Before" property of
-# the CA certificate used to sign generated certificates.
-#
-# setCommonName or setCommonName{CN}
-# Sets Subject.CN property to the host name specified as a
-# CN parameter or, if no explicit CN parameter was specified,
-# extracted from the CONNECT request. It is a misconfiguration
-# to use setCommonName without an explicit parameter for
-# intercepted or tproxied SSL connections.
-#
-# This clause only supports fast acl types.
-#
-# Squid first groups sslproxy_cert_adapt options by adaptation algorithm.
-# Within a group, when sslproxy_cert_adapt acl(s) match, Squid uses the
-# corresponding adaptation algorithm to generate the certificate and
-# ignores all subsequent sslproxy_cert_adapt options in that algorithm's
-# group (i.e., the first match wins within each algorithm group). If no
-# acl(s) match, the default mimicking action takes place.
-#
-# WARNING: SQUID_X509_V_ERR_DOMAIN_MISMATCH and ssl:certDomainMismatch can
-# be used with sslproxy_cert_adapt, but if and only if Squid is bumping a
-# CONNECT request that carries a domain name. In all other cases (CONNECT
-# to an IP address or an intercepted SSL connection), Squid cannot detect
-# the domain mismatch at certificate generation time when
-# bump-server-first is used.
-#Default:
-# none
-
-# TAG: sslpassword_program
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl
-#
-# Specify a program used for entering SSL key passphrases
-# when using encrypted SSL certificate keys. If not specified
-# keys must either be unencrypted, or Squid started with the -N
-# option to allow it to query interactively for the passphrase.
-#
-# The key file name is given as argument to the program allowing
-# selection of the right password if you have multiple encrypted
-# keys.
-#Default:
-# none
-
-# OPTIONS RELATING TO EXTERNAL SSL_CRTD
-# -----------------------------------------------------------------------------
-
-# TAG: sslcrtd_program
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl-crtd
-#
-# Specify the location and options of the executable for ssl_crtd process.
-# /usr/lib/squid3/ssl_crtd program requires -s and -M parameters
-# For more information use:
-# /usr/lib/squid3/ssl_crtd -h
-#Default:
-# sslcrtd_program /usr/lib/squid3/ssl_crtd -s /var/lib/ssl_db -M 4MB
-
-# TAG: sslcrtd_children
-# Note: This option is only available if Squid is rebuilt with the
-# --enable-ssl-crtd
-#
-# The maximum number of processes spawn to service ssl server.
-# The maximum this may be safely set to is 32.
-#
-# The startup= and idle= options allow some measure of skew in your
-# tuning.
-#
-# startup=N
-#
-# Sets the minimum number of processes to spawn when Squid
-# starts or reconfigures. When set to zero the first request will
-# cause spawning of the first child process to handle it.
-#
-# Starting too few children temporary slows Squid under load while it
-# tries to spawn enough additional processes to cope with traffic.
-#
-# idle=N
-#
-# Sets a minimum of how many processes Squid is to try and keep available
-# at all times. When traffic begins to rise above what the existing
-# processes can handle this many more will be spawned up to the maximum
-# configured. A minimum setting of 1 is required.
-#
-# You must have at least one ssl_crtd process.
-#Default:
-# sslcrtd_children 32 startup=5 idle=1
-
-# OPTIONS WHICH AFFECT THE NEIGHBOR SELECTION ALGORITHM
-# -----------------------------------------------------------------------------
-
-# TAG: cache_peer
-# To specify other caches in a hierarchy, use the format:
-#
-# cache_peer hostname type http-port icp-port [options]
-#
-# For example,
-#
-# # proxy icp
-# # hostname type port port options
-# # -------------------- -------- ----- ----- -----------
-# cache_peer parent.foo.net parent 3128 3130 default
-# cache_peer sib1.foo.net sibling 3128 3130 proxy-only
-# cache_peer sib2.foo.net sibling 3128 3130 proxy-only
-# cache_peer example.com parent 80 0 default
-# cache_peer cdn.example.com sibling 3128 0
-#
-# type: either 'parent', 'sibling', or 'multicast'.
-#
-# proxy-port: The port number where the peer accept HTTP requests.
-# For other Squid proxies this is usually 3128
-# For web servers this is usually 80
-#
-# icp-port: Used for querying neighbor caches about objects.
-# Set to 0 if the peer does not support ICP or HTCP.
-# See ICP and HTCP options below for additional details.
-#
-#
-# ==== ICP OPTIONS ====
-#
-# You MUST also set icp_port and icp_access explicitly when using these options.
-# The defaults will prevent peer traffic using ICP.
-#
-#
-# no-query Disable ICP queries to this neighbor.
-#
-# multicast-responder
-# Indicates the named peer is a member of a multicast group.
-# ICP queries will not be sent directly to the peer, but ICP
-# replies will be accepted from it.
-#
-# closest-only Indicates that, for ICP_OP_MISS replies, we'll only forward
-# CLOSEST_PARENT_MISSes and never FIRST_PARENT_MISSes.
-#
-# background-ping
-# To only send ICP queries to this neighbor infrequently.
-# This is used to keep the neighbor round trip time updated
-# and is usually used in conjunction with weighted-round-robin.
-#
-#
-# ==== HTCP OPTIONS ====
-#
-# You MUST also set htcp_port and htcp_access explicitly when using these options.
-# The defaults will prevent peer traffic using HTCP.
-#
-#
-# htcp Send HTCP, instead of ICP, queries to the neighbor.
-# You probably also want to set the "icp-port" to 4827
-# instead of 3130. This directive accepts a comma separated
-# list of options described below.
-#
-# htcp=oldsquid Send HTCP to old Squid versions (2.5 or earlier).
-#
-# htcp=no-clr Send HTCP to the neighbor but without
-# sending any CLR requests. This cannot be used with
-# only-clr.
-#
-# htcp=only-clr Send HTCP to the neighbor but ONLY CLR requests.
-# This cannot be used with no-clr.
-#
-# htcp=no-purge-clr
-# Send HTCP to the neighbor including CLRs but only when
-# they do not result from PURGE requests.
-#
-# htcp=forward-clr
-# Forward any HTCP CLR requests this proxy receives to the peer.
-#
-#
-# ==== PEER SELECTION METHODS ====
-#
-# The default peer selection method is ICP, with the first responding peer
-# being used as source. These options can be used for better load balancing.
-#
-#
-# default This is a parent cache which can be used as a "last-resort"
-# if a peer cannot be located by any of the peer-selection methods.
-# If specified more than once, only the first is used.
-#
-# round-robin Load-Balance parents which should be used in a round-robin
-# fashion in the absence of any ICP queries.
-# weight=N can be used to add bias.
-#
-# weighted-round-robin
-# Load-Balance parents which should be used in a round-robin
-# fashion with the frequency of each parent being based on the
-# round trip time. Closer parents are used more often.
-# Usually used for background-ping parents.
-# weight=N can be used to add bias.
-#
-# carp Load-Balance parents which should be used as a CARP array.
-# The requests will be distributed among the parents based on the
-# CARP load balancing hash function based on their weight.
-#
-# userhash Load-balance parents based on the client proxy_auth or ident username.
-#
-# sourcehash Load-balance parents based on the client source IP.
-#
-# multicast-siblings
-# To be used only for cache peers of type "multicast".
-# ALL members of this multicast group have "sibling"
-# relationship with it, not "parent". This is to a multicast
-# group when the requested object would be fetched only from
-# a "parent" cache, anyway. It's useful, e.g., when
-# configuring a pool of redundant Squid proxies, being
-# members of the same multicast group.
-#
-#
-# ==== PEER SELECTION OPTIONS ====
-#
-# weight=N use to affect the selection of a peer during any weighted
-# peer-selection mechanisms.
-# The weight must be an integer; default is 1,
-# larger weights are favored more.
-# This option does not affect parent selection if a peering
-# protocol is not in use.
-#
-# basetime=N Specify a base amount to be subtracted from round trip
-# times of parents.
-# It is subtracted before division by weight in calculating
-# which parent to fectch from. If the rtt is less than the
-# base time the rtt is set to a minimal value.
-#
-# ttl=N Specify a TTL to use when sending multicast ICP queries
-# to this address.
-# Only useful when sending to a multicast group.
-# Because we don't accept ICP replies from random
-# hosts, you must configure other group members as
-# peers with the 'multicast-responder' option.
-#
-# no-delay To prevent access to this neighbor from influencing the
-# delay pools.
-#
-# digest-url=URL Tell Squid to fetch the cache digest (if digests are
-# enabled) for this host from the specified URL rather
-# than the Squid default location.
-#
-#
-# ==== CARP OPTIONS ====
-#
-# carp-key=key-specification
-# use a different key than the full URL to hash against the peer.
-# the key-specification is a comma-separated list of the keywords
-# scheme, host, port, path, params
-# Order is not important.
-#
-# ==== ACCELERATOR / REVERSE-PROXY OPTIONS ====
-#
-# originserver Causes this parent to be contacted as an origin server.
-# Meant to be used in accelerator setups when the peer
-# is a web server.
-#
-# forceddomain=name
-# Set the Host header of requests forwarded to this peer.
-# Useful in accelerator setups where the server (peer)
-# expects a certain domain name but clients may request
-# others. ie example.com or www.example.com
-#
-# no-digest Disable request of cache digests.
-#
-# no-netdb-exchange
-# Disables requesting ICMP RTT database (NetDB).
-#
-#
-# ==== AUTHENTICATION OPTIONS ====
-#
-# login=user:password
-# If this is a personal/workgroup proxy and your parent
-# requires proxy authentication.
-#
-# Note: The string can include URL escapes (i.e. %20 for
-# spaces). This also means % must be written as %%.
-#
-# login=PASSTHRU
-# Send login details received from client to this peer.
-# Both Proxy- and WWW-Authorization headers are passed
-# without alteration to the peer.
-# Authentication is not required by Squid for this to work.
-#
-# Note: This will pass any form of authentication but
-# only Basic auth will work through a proxy unless the
-# connection-auth options are also used.
-#
-# login=PASS Send login details received from client to this peer.
-# Authentication is not required by this option.
-#
-# If there are no client-provided authentication headers
-# to pass on, but username and password are available
-# from an external ACL user= and password= result tags
-# they may be sent instead.
-#
-# Note: To combine this with proxy_auth both proxies must
-# share the same user database as HTTP only allows for
-# a single login (one for proxy, one for origin server).
-# Also be warned this will expose your users proxy
-# password to the peer. USE WITH CAUTION
-#
-# login=*:password
-# Send the username to the upstream cache, but with a
-# fixed password. This is meant to be used when the peer
-# is in another administrative domain, but it is still
-# needed to identify each user.
-# The star can optionally be followed by some extra
-# information which is added to the username. This can
-# be used to identify this proxy to the peer, similar to
-# the login=username:password option above.
-#
-# login=NEGOTIATE
-# If this is a personal/workgroup proxy and your parent
-# requires a secure proxy authentication.
-# The first principal from the default keytab or defined by
-# the environment variable KRB5_KTNAME will be used.
-#
-# WARNING: The connection may transmit requests from multiple
-# clients. Negotiate often assumes end-to-end authentication
-# and a single-client. Which is not strictly true here.
-#
-# login=NEGOTIATE:principal_name
-# If this is a personal/workgroup proxy and your parent
-# requires a secure proxy authentication.
-# The principal principal_name from the default keytab or
-# defined by the environment variable KRB5_KTNAME will be
-# used.
-#
-# WARNING: The connection may transmit requests from multiple
-# clients. Negotiate often assumes end-to-end authentication
-# and a single-client. Which is not strictly true here.
-#
-# connection-auth=on|off
-# Tell Squid that this peer does or not support Microsoft
-# connection oriented authentication, and any such
-# challenges received from there should be ignored.
-# Default is auto to automatically determine the status
-# of the peer.
-#
-#
-# ==== SSL / HTTPS / TLS OPTIONS ====
-#
-# ssl Encrypt connections to this peer with SSL/TLS.
-#
-# sslcert=/path/to/ssl/certificate
-# A client SSL certificate to use when connecting to
-# this peer.
-#
-# sslkey=/path/to/ssl/key
-# The private SSL key corresponding to sslcert above.
-# If 'sslkey' is not specified 'sslcert' is assumed to
-# reference a combined file containing both the
-# certificate and the key.
-#
-# Notes:
-#
-# On Debian/Ubuntu systems a default snakeoil certificate is
-# available in /etc/ss and users can set:
-#
-# cert=/etc/ssl/certs/ssl-cert-snakeoil.pem
-#
-# and
-#
-# key=/etc/ssl/private/ssl-cert-snakeoil.key
-#
-# for testing.
-#
-# sslversion=1|2|3|4|5|6
-# The SSL version to use when connecting to this peer
-# 1 = automatic (default)
-# 2 = SSL v2 only
-# 3 = SSL v3 only
-# 4 = TLS v1.0 only
-# 5 = TLS v1.1 only
-# 6 = TLS v1.2 only
-#
-# sslcipher=... The list of valid SSL ciphers to use when connecting
-# to this peer.
-#
-# ssloptions=... Specify various SSL implementation options:
-#
-# NO_SSLv2 Disallow the use of SSLv2
-# NO_SSLv3 Disallow the use of SSLv3
-# NO_TLSv1 Disallow the use of TLSv1.0
-# NO_TLSv1_1 Disallow the use of TLSv1.1
-# NO_TLSv1_2 Disallow the use of TLSv1.2
-# SINGLE_DH_USE
-# Always create a new key when using
-# temporary/ephemeral DH key exchanges
-# ALL Enable various bug workarounds
-# suggested as "harmless" by OpenSSL
-# Be warned that this reduces SSL/TLS
-# strength to some attacks.
-#
-# See the OpenSSL SSL_CTX_set_options documentation for a
-# more complete list.
-#
-# sslcafile=... A file containing additional CA certificates to use
-# when verifying the peer certificate.
-#
-# sslcapath=... A directory containing additional CA certificates to
-# use when verifying the peer certificate.
-#
-# sslcrlfile=... A certificate revocation list file to use when
-# verifying the peer certificate.
-#
-# sslflags=... Specify various flags modifying the SSL implementation:
-#
-# DONT_VERIFY_PEER
-# Accept certificates even if they fail to
-# verify.
-# NO_DEFAULT_CA
-# Don't use the default CA list built in
-# to OpenSSL.
-# DONT_VERIFY_DOMAIN
-# Don't verify the peer certificate
-# matches the server name
-#
-# ssldomain= The peer name as advertised in it's certificate.
-# Used for verifying the correctness of the received peer
-# certificate. If not specified the peer hostname will be
-# used.
-#
-# front-end-https
-# Enable the "Front-End-Https: On" header needed when
-# using Squid as a SSL frontend in front of Microsoft OWA.
-# See MS KB document Q307347 for details on this header.
-# If set to auto the header will only be added if the
-# request is forwarded as a https:// URL.
-#
-#
-# ==== GENERAL OPTIONS ====
-#
-# connect-timeout=N
-# A peer-specific connect timeout.
-# Also see the peer_connect_timeout directive.
-#
-# connect-fail-limit=N
-# How many times connecting to a peer must fail before
-# it is marked as down. Default is 10.
-#
-# allow-miss Disable Squid's use of only-if-cached when forwarding
-# requests to siblings. This is primarily useful when
-# icp_hit_stale is used by the sibling. To extensive use
-# of this option may result in forwarding loops, and you
-# should avoid having two-way peerings with this option.
-# For example to deny peer usage on requests from peer
-# by denying cache_peer_access if the source is a peer.
-#
-# max-conn=N Limit the amount of connections Squid may open to this
-# peer. see also
-#
-# name=xxx Unique name for the peer.
-# Required if you have multiple peers on the same host
-# but different ports.
-# This name can be used in cache_peer_access and similar
-# directives to dentify the peer.
-# Can be used by outgoing access controls through the
-# peername ACL type.
-#
-# no-tproxy Do not use the client-spoof TPROXY support when forwarding
-# requests to this peer. Use normal address selection instead.
-#
-# proxy-only objects fetched from the peer will not be stored locally.
-#
-#Default:
-# none
-
-# TAG: cache_peer_domain
-# Use to limit the domains for which a neighbor cache will be
-# queried.
-#
-# Usage:
-# cache_peer_domain cache-host domain [domain ...]
-# cache_peer_domain cache-host !domain
-#
-# For example, specifying
-#
-# cache_peer_domain parent.foo.net .edu
-#
-# has the effect such that UDP query packets are sent to
-# 'bigserver' only when the requested object exists on a
-# server in the .edu domain. Prefixing the domainname
-# with '!' means the cache will be queried for objects
-# NOT in that domain.
-#
-# NOTE: * Any number of domains may be given for a cache-host,
-# either on the same or separate lines.
-# * When multiple domains are given for a particular
-# cache-host, the first matched domain is applied.
-# * Cache hosts with no domain restrictions are queried
-# for all requests.
-# * There are no defaults.
-# * There is also a 'cache_peer_access' tag in the ACL
-# section.
-#Default:
-# none
-
-# TAG: cache_peer_access
-# Similar to 'cache_peer_domain' but provides more flexibility by
-# using ACL elements.
-#
-# Usage:
-# cache_peer_access cache-host allow|deny [!]aclname ...
-#
-# The syntax is identical to 'http_access' and the other lists of
-# ACL elements. See the comments for 'http_access' below, or
-# the Squid FAQ (http://wiki.squid-cache.org/SquidFaq/SquidAcl).
-#Default:
-# none
-
-# TAG: neighbor_type_domain
-# Modify the cache_peer neighbor type when passing requests
-# about specific domains to the peer.
-#
-# Usage:
-# neighbor_type_domain neighbor parent|sibling domain domain ...
-#
-# For example:
-# cache_peer foo.example.com parent 3128 3130
-# neighbor_type_domain foo.example.com sibling .au .de
-#
-# The above configuration treats all requests to foo.example.com as a
-# parent proxy unless the request is for a .au or .de ccTLD domain name.
-#Default:
-# The peer type from cache_peer directive is used for all requests to that peer.
-
-# TAG: dead_peer_timeout (seconds)
-# This controls how long Squid waits to declare a peer cache
-# as "dead." If there are no ICP replies received in this
-# amount of time, Squid will declare the peer dead and not
-# expect to receive any further ICP replies. However, it
-# continues to send ICP queries, and will mark the peer as
-# alive upon receipt of the first subsequent ICP reply.
-#
-# This timeout also affects when Squid expects to receive ICP
-# replies from peers. If more than 'dead_peer' seconds have
-# passed since the last ICP reply was received, Squid will not
-# expect to receive an ICP reply on the next query. Thus, if
-# your time between requests is greater than this timeout, you
-# will see a lot of requests sent DIRECT to origin servers
-# instead of to your parents.
-#Default:
-# dead_peer_timeout 10 seconds
-
-# TAG: forward_max_tries
-# Controls how many different forward paths Squid will try
-# before giving up. See also forward_timeout.
-#
-# NOTE: connect_retries (default: none) can make each of these
-# possible forwarding paths be tried multiple times.
-#Default:
-# forward_max_tries 10
-
-# TAG: hierarchy_stoplist
-# A list of words which, if found in a URL, cause the object to
-# be handled directly by this cache. In other words, use this
-# to not query neighbor caches for certain objects. You may
-# list this option multiple times.
-#
-# Example:
-# hierarchy_stoplist cgi-bin ?
-#
-# Note: never_direct overrides this option.
-#Default:
-# none
-
-# MEMORY CACHE OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: cache_mem (bytes)
-# NOTE: THIS PARAMETER DOES NOT SPECIFY THE MAXIMUM PROCESS SIZE.
-# IT ONLY PLACES A LIMIT ON HOW MUCH ADDITIONAL MEMORY SQUID WILL
-# USE AS A MEMORY CACHE OF OBJECTS. SQUID USES MEMORY FOR OTHER
-# THINGS AS WELL. SEE THE SQUID FAQ SECTION 8 FOR DETAILS.
-#
-# 'cache_mem' specifies the ideal amount of memory to be used
-# for:
-# * In-Transit objects
-# * Hot Objects
-# * Negative-Cached objects
-#
-# Data for these objects are stored in 4 KB blocks. This
-# parameter specifies the ideal upper limit on the total size of
-# 4 KB blocks allocated. In-Transit objects take the highest
-# priority.
-#
-# In-transit objects have priority over the others. When
-# additional space is needed for incoming data, negative-cached
-# and hot objects will be released. In other words, the
-# negative-cached and hot objects will fill up any unused space
-# not needed for in-transit objects.
-#
-# If circumstances require, this limit will be exceeded.
-# Specifically, if your incoming request rate requires more than
-# 'cache_mem' of memory to hold in-transit objects, Squid will
-# exceed this limit to satisfy the new requests. When the load
-# decreases, blocks will be freed until the high-water mark is
-# reached. Thereafter, blocks will be used to store hot
-# objects.
-#
-# If shared memory caching is enabled, Squid does not use the shared
-# cache space for in-transit objects, but they still consume as much
-# local memory as they need. For more details about the shared memory
-# cache, see memory_cache_shared.
-#Default:
-# cache_mem 256 MB
-
-# TAG: maximum_object_size_in_memory (bytes)
-# Objects greater than this size will not be attempted to kept in
-# the memory cache. This should be set high enough to keep objects
-# accessed frequently in memory to improve performance whilst low
-# enough to keep larger objects from hoarding cache_mem.
-#Default:
-# maximum_object_size_in_memory 512 KB
-
-# TAG: memory_cache_shared on|off
-# Controls whether the memory cache is shared among SMP workers.
-#
-# The shared memory cache is meant to occupy cache_mem bytes and replace
-# the non-shared memory cache, although some entities may still be
-# cached locally by workers for now (e.g., internal and in-transit
-# objects may be served from a local memory cache even if shared memory
-# caching is enabled).
-#
-# By default, the memory cache is shared if and only if all of the
-# following conditions are satisfied: Squid runs in SMP mode with
-# multiple workers, cache_mem is positive, and Squid environment
-# supports required IPC primitives (e.g., POSIX shared memory segments
-# and GCC-style atomic operations).
-#
-# To avoid blocking locks, shared memory uses opportunistic algorithms
-# that do not guarantee that every cachable entity that could have been
-# shared among SMP workers will actually be shared.
-#
-# Currently, entities exceeding 32KB in size cannot be shared.
-#Default:
-# "on" where supported if doing memory caching with multiple SMP workers.
-
-# TAG: memory_cache_mode
-# Controls which objects to keep in the memory cache (cache_mem)
-#
-# always Keep most recently fetched objects in memory (default)
-#
-# disk Only disk cache hits are kept in memory, which means
-# an object must first be cached on disk and then hit
-# a second time before cached in memory.
-#
-# network Only objects fetched from network is kept in memory
-#Default:
-# Keep the most recently fetched objects in memory
-
-# TAG: memory_replacement_policy
-# The memory replacement policy parameter determines which
-# objects are purged from memory when memory space is needed.
-#
-# See cache_replacement_policy for details on algorithms.
-#Default:
-# memory_replacement_policy lru
-
-# DISK CACHE OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: cache_replacement_policy
-# The cache replacement policy parameter determines which
-# objects are evicted (replaced) when disk space is needed.
-#
-# lru : Squid's original list based LRU policy
-# heap GDSF : Greedy-Dual Size Frequency
-# heap LFUDA: Least Frequently Used with Dynamic Aging
-# heap LRU : LRU policy implemented using a heap
-#
-# Applies to any cache_dir lines listed below this directive.
-#
-# The LRU policies keeps recently referenced objects.
-#
-# The heap GDSF policy optimizes object hit rate by keeping smaller
-# popular objects in cache so it has a better chance of getting a
-# hit. It achieves a lower byte hit rate than LFUDA though since
-# it evicts larger (possibly popular) objects.
-#
-# The heap LFUDA policy keeps popular objects in cache regardless of
-# their size and thus optimizes byte hit rate at the expense of
-# hit rate since one large, popular object will prevent many
-# smaller, slightly less popular objects from being cached.
-#
-# Both policies utilize a dynamic aging mechanism that prevents
-# cache pollution that can otherwise occur with frequency-based
-# replacement policies.
-#
-# NOTE: if using the LFUDA replacement policy you should increase
-# the value of maximum_object_size above its default of 4 MB to
-# to maximize the potential byte hit rate improvement of LFUDA.
-#
-# For more information about the GDSF and LFUDA cache replacement
-# policies see http://www.hpl.hp.com/techreports/1999/HPL-1999-69.html
-# and http://fog.hpl.external.hp.com/techreports/98/HPL-98-173.html.
-#Default:
-# cache_replacement_policy lru
-
-# TAG: cache_dir
-# Format:
-# cache_dir Type Directory-Name Fs-specific-data [options]
-#
-# You can specify multiple cache_dir lines to spread the
-# cache among different disk partitions.
-#
-# Type specifies the kind of storage system to use. Only "ufs"
-# is built by default. To enable any of the other storage systems
-# see the --enable-storeio configure option.
-#
-# 'Directory' is a top-level directory where cache swap
-# files will be stored. If you want to use an entire disk
-# for caching, this can be the mount-point directory.
-# The directory must exist and be writable by the Squid
-# process. Squid will NOT create this directory for you.
-#
-# In SMP configurations, cache_dir must not precede the workers option
-# and should use configuration macros or conditionals to give each
-# worker interested in disk caching a dedicated cache directory.
-#
-#
-# ==== The ufs store type ====
-#
-# "ufs" is the old well-known Squid storage format that has always
-# been there.
-#
-# Usage:
-# cache_dir ufs Directory-Name Mbytes L1 L2 [options]
-#
-# 'Mbytes' is the amount of disk space (MB) to use under this
-# directory. The default is 100 MB. Change this to suit your
-# configuration. Do NOT put the size of your disk drive here.
-# Instead, if you want Squid to use the entire disk drive,
-# subtract 20% and use that value.
-#
-# 'L1' is the number of first-level subdirectories which
-# will be created under the 'Directory'. The default is 16.
-#
-# 'L2' is the number of second-level subdirectories which
-# will be created under each first-level directory. The default
-# is 256.
-#
-#
-# ==== The aufs store type ====
-#
-# "aufs" uses the same storage format as "ufs", utilizing
-# POSIX-threads to avoid blocking the main Squid process on
-# disk-I/O. This was formerly known in Squid as async-io.
-#
-# Usage:
-# cache_dir aufs Directory-Name Mbytes L1 L2 [options]
-#
-# see argument descriptions under ufs above
-#
-#
-# ==== The diskd store type ====
-#
-# "diskd" uses the same storage format as "ufs", utilizing a
-# separate process to avoid blocking the main Squid process on
-# disk-I/O.
-#
-# Usage:
-# cache_dir diskd Directory-Name Mbytes L1 L2 [options] [Q1=n] [Q2=n]
-#
-# see argument descriptions under ufs above
-#
-# Q1 specifies the number of unacknowledged I/O requests when Squid
-# stops opening new files. If this many messages are in the queues,
-# Squid won't open new files. Default is 64
-#
-# Q2 specifies the number of unacknowledged messages when Squid
-# starts blocking. If this many messages are in the queues,
-# Squid blocks until it receives some replies. Default is 72
-#
-# When Q1 < Q2 (the default), the cache directory is optimized
-# for lower response time at the expense of a decrease in hit
-# ratio. If Q1 > Q2, the cache directory is optimized for
-# higher hit ratio at the expense of an increase in response
-# time.
-#
-#
-# ==== The rock store type ====
-#
-# Usage:
-# cache_dir rock Directory-Name Mbytes <max-size=bytes> [options]
-#
-# The Rock Store type is a database-style storage. All cached
-# entries are stored in a "database" file, using fixed-size slots,
-# one entry per slot. The database size is specified in MB. The
-# slot size is specified in bytes using the max-size option. See
-# below for more info on the max-size option.
-#
-# If possible, Squid using Rock Store creates a dedicated kid
-# process called "disker" to avoid blocking Squid worker(s) on disk
-# I/O. One disker kid is created for each rock cache_dir. Diskers
-# are created only when Squid, running in daemon mode, has support
-# for the IpcIo disk I/O module.
-#
-# swap-timeout=msec: Squid will not start writing a miss to or
-# reading a hit from disk if it estimates that the swap operation
-# will take more than the specified number of milliseconds. By
-# default and when set to zero, disables the disk I/O time limit
-# enforcement. Ignored when using blocking I/O module because
-# blocking synchronous I/O does not allow Squid to estimate the
-# expected swap wait time.
-#
-# max-swap-rate=swaps/sec: Artificially limits disk access using
-# the specified I/O rate limit. Swap out requests that
-# would cause the average I/O rate to exceed the limit are
-# delayed. Individual swap in requests (i.e., hits or reads) are
-# not delayed, but they do contribute to measured swap rate and
-# since they are placed in the same FIFO queue as swap out
-# requests, they may wait longer if max-swap-rate is smaller.
-# This is necessary on file systems that buffer "too
-# many" writes and then start blocking Squid and other processes
-# while committing those writes to disk. Usually used together
-# with swap-timeout to avoid excessive delays and queue overflows
-# when disk demand exceeds available disk "bandwidth". By default
-# and when set to zero, disables the disk I/O rate limit
-# enforcement. Currently supported by IpcIo module only.
-#
-#
-# ==== The coss store type ====
-#
-# NP: COSS filesystem in Squid-3 has been deemed too unstable for
-# production use and has thus been removed from this release.
-# We hope that it can be made usable again soon.
-#
-# block-size=n defines the "block size" for COSS cache_dir's.
-# Squid uses file numbers as block numbers. Since file numbers
-# are limited to 24 bits, the block size determines the maximum
-# size of the COSS partition. The default is 512 bytes, which
-# leads to a maximum cache_dir size of 512<<24, or 8 GB. Note
-# you should not change the coss block size after Squid
-# has written some objects to the cache_dir.
-#
-# The coss file store has changed from 2.5. Now it uses a file
-# called 'stripe' in the directory names in the config - and
-# this will be created by squid -z.
-#
-#
-# ==== COMMON OPTIONS ====
-#
-# no-store no new objects should be stored to this cache_dir.
-#
-# min-size=n the minimum object size in bytes this cache_dir
-# will accept. It's used to restrict a cache_dir
-# to only store large objects (e.g. AUFS) while
-# other stores are optimized for smaller objects
-# (e.g. COSS).
-# Defaults to 0.
-#
-# max-size=n the maximum object size in bytes this cache_dir
-# supports.
-# The value in maximum_object_size directive, sets
-# a default unless more specific details are available
-# about the cache_dir (ie a small store capacity).
-#
-# Note: To make optimal use of the max-size limits you should order
-# the cache_dir lines with the smallest max-size value first.
-#
-# Note for coss, max-size must be less than COSS_MEMBUF_SZ,
-# which can be changed with the --with-coss-membuf-size=N configure
-# option.
-#
-#Default:
-# No disk cache. Store cache ojects only in memory.
-#
-
-# Uncomment and adjust the following to add a disk cache directory.
-#cache_dir ufs /var/spool/squid3 100 16 256
-cache_dir ufs /mnt/spool/squid/ 30000 16 256
-
-# TAG: store_dir_select_algorithm
-# How Squid selects which cache_dir to use when the response
-# object will fit into more than one.
-#
-# Regardless of which algorithm is used the cache_dir min-size
-# and max-size parameters are obeyed. As such they can affect
-# the selection algorithm by limiting the set of considered
-# cache_dir.
-#
-# Algorithms:
-#
-# least-load
-#
-# This algorithm is suited to caches with similar cache_dir
-# sizes and disk speeds.
-#
-# The disk with the least I/O pending is selected.
-# When there are multiple disks with the same I/O load ranking
-# the cache_dir with most available capacity is selected.
-#
-# When a mix of cache_dir sizes are configured the faster disks
-# have a naturally lower I/O loading and larger disks have more
-# capacity. So space used to store objects and data throughput
-# may be very unbalanced towards larger disks.
-#
-#
-# round-robin
-#
-# This algorithm is suited to caches with unequal cache_dir
-# disk sizes.
-#
-# Each cache_dir is selected in a rotation. The next suitable
-# cache_dir is used.
-#
-# Available cache_dir capacity is only considered in relation
-# to whether the object will fit and meets the min-size and
-# max-size parameters.
-#
-# Disk I/O loading is only considered to prevent overload on slow
-# disks. This algorithm does not spread objects by size, so any
-# I/O loading per-disk may appear very unbalanced and volatile.
-#
-#Default:
-# store_dir_select_algorithm least-load
-
-# TAG: max_open_disk_fds
-# To avoid having disk as the I/O bottleneck Squid can optionally
-# bypass the on-disk cache if more than this amount of disk file
-# descriptors are open.
-#
-# A value of 0 indicates no limit.
-#Default:
-# no limit
-
-# TAG: minimum_object_size (bytes)
-# Objects smaller than this size will NOT be saved on disk. The
-# value is specified in bytes, and the default is 0 KB, which
-# means all responses can be stored.
-#Default:
-# no limit
-
-# TAG: maximum_object_size (bytes)
-# The default limit on size of objects stored to disk.
-# This size is used for cache_dir where max-size is not set.
-# The value is specified in bytes, and the default is 4 MB.
-#
-# If you wish to get a high BYTES hit ratio, you should probably
-# increase this (one 32 MB object hit counts for 3200 10KB
-# hits).
-#
-# If you wish to increase hit ratio more than you want to
-# save bandwidth you should leave this low.
-#
-# NOTE: if using the LFUDA replacement policy you should increase
-# this value to maximize the byte hit rate improvement of LFUDA!
-# See replacement_policy below for a discussion of this policy.
-#Default:
-# maximum_object_size 4 MB
-
-# TAG: cache_swap_low (percent, 0-100)
-# The low-water mark for cache object replacement.
-# Replacement begins when the swap (disk) usage is above the
-# low-water mark and attempts to maintain utilization near the
-# low-water mark. As swap utilization gets close to high-water
-# mark object eviction becomes more aggressive. If utilization is
-# close to the low-water mark less replacement is done each time.
-#
-# Defaults are 90% and 95%. If you have a large cache, 5% could be
-# hundreds of MB. If this is the case you may wish to set these
-# numbers closer together.
-#
-# See also cache_swap_high
-#Default:
-# cache_swap_low 90
-
-# TAG: cache_swap_high (percent, 0-100)
-# The high-water mark for cache object replacement.
-# Replacement begins when the swap (disk) usage is above the
-# low-water mark and attempts to maintain utilization near the
-# low-water mark. As swap utilization gets close to high-water
-# mark object eviction becomes more aggressive. If utilization is
-# close to the low-water mark less replacement is done each time.
-#
-# Defaults are 90% and 95%. If you have a large cache, 5% could be
-# hundreds of MB. If this is the case you may wish to set these
-# numbers closer together.
-#
-# See also cache_swap_low
-#Default:
-# cache_swap_high 95
-
-# LOGFILE OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: logformat
-# Usage:
-#
-# logformat <name> <format specification>
-#
-# Defines an access log format.
-#
-# The <format specification> is a string with embedded % format codes
-#
-# % format codes all follow the same basic structure where all but
-# the formatcode is optional. Output strings are automatically escaped
-# as required according to their context and the output format
-# modifiers are usually not needed, but can be specified if an explicit
-# output format is desired.
-#
-# % ["|[|'|#] [-] [[0]width] [{argument}] formatcode
-#
-# " output in quoted string format
-# [ output in squid text log format as used by log_mime_hdrs
-# # output in URL quoted format
-# ' output as-is
-#
-# - left aligned
-#
-# width minimum and/or maximum field width:
-# [width_min][.width_max]
-# When minimum starts with 0, the field is zero-padded.
-# String values exceeding maximum width are truncated.
-#
-# {arg} argument such as header name etc
-#
-# Format codes:
-#
-# % a literal % character
-# sn Unique sequence number per log line entry
-# err_code The ID of an error response served by Squid or
-# a similar internal error identifier.
-# err_detail Additional err_code-dependent error information.
-#
-# Connection related format codes:
-#
-# >a Client source IP address
-# >A Client FQDN
-# >p Client source port
-# >eui Client source EUI (MAC address, EUI-48 or EUI-64 identifier)
-# >la Local IP address the client connected to
-# >lp Local port number the client connected to
-#
-# la Local listening IP address the client connection was connected to.
-# lp Local listening port number the client connection was connected to.
-#
-# <a Server IP address of the last server or peer connection
-# <A Server FQDN or peer name
-# <p Server port number of the last server or peer connection
-# <la Local IP address of the last server or peer connection
-# <lp Local port number of the last server or peer connection
-#
-# Time related format codes:
-#
-# ts Seconds since epoch
-# tu subsecond time (milliseconds)
-# tl Local time. Optional strftime format argument
-# default %d/%b/%Y:%H:%M:%S %z
-# tg GMT time. Optional strftime format argument
-# default %d/%b/%Y:%H:%M:%S %z
-# tr Response time (milliseconds)
-# dt Total time spent making DNS lookups (milliseconds)
-#
-# Access Control related format codes:
-#
-# et Tag returned by external acl
-# ea Log string returned by external acl
-# un User name (any available)
-# ul User name from authentication
-# ue User name from external acl helper
-# ui User name from ident
-# us User name from SSL
-#
-# HTTP related format codes:
-#
-# [http::]>h Original received request header.
-# Usually differs from the request header sent by
-# Squid, although most fields are often preserved.
-# Accepts optional header field name/value filter
-# argument using name[:[separator]element] format.
-# [http::]>ha Received request header after adaptation and
-# redirection (pre-cache REQMOD vectoring point).
-# Usually differs from the request header sent by
-# Squid, although most fields are often preserved.
-# Optional header name argument as for >h
-# [http::]<h Reply header. Optional header name argument
-# as for >h
-# [http::]>Hs HTTP status code sent to the client
-# [http::]<Hs HTTP status code received from the next hop
-# [http::]<bs Number of HTTP-equivalent message body bytes
-# received from the next hop, excluding chunked
-# transfer encoding and control messages.
-# Generated FTP/Gopher listings are treated as
-# received bodies.
-# [http::]mt MIME content type
-# [http::]rm Request method (GET/POST etc)
-# [http::]>rm Request method from client
-# [http::]<rm Request method sent to server or peer
-# [http::]ru Request URL from client (historic, filtered for logging)
-# [http::]>ru Request URL from client
-# [http::]<ru Request URL sent to server or peer
-# [http::]rp Request URL-Path excluding hostname
-# [http::]>rp Request URL-Path excluding hostname from client
-# [http::]<rp Request URL-Path excluding hostname sento to server or peer
-# [http::]rv Request protocol version
-# [http::]>rv Request protocol version from client
-# [http::]<rv Request protocol version sent to server or peer
-# [http::]<st Sent reply size including HTTP headers
-# [http::]>st Received request size including HTTP headers. In the
-# case of chunked requests the chunked encoding metadata
-# are not included
-# [http::]>sh Received HTTP request headers size
-# [http::]<sh Sent HTTP reply headers size
-# [http::]st Request+Reply size including HTTP headers
-# [http::]<sH Reply high offset sent
-# [http::]<sS Upstream object size
-# [http::]<pt Peer response time in milliseconds. The timer starts
-# when the last request byte is sent to the next hop
-# and stops when the last response byte is received.
-# [http::]<tt Total server-side time in milliseconds. The timer
-# starts with the first connect request (or write I/O)
-# sent to the first selected peer. The timer stops
-# with the last I/O with the last peer.
-#
-# Squid handling related format codes:
-#
-# Ss Squid request status (TCP_MISS etc)
-# Sh Squid hierarchy status (DEFAULT_PARENT etc)
-#
-# SSL-related format codes:
-#
-# ssl::bump_mode SslBump decision for the transaction:
-#
-# For CONNECT requests that initiated bumping of
-# a connection and for any request received on
-# an already bumped connection, Squid logs the
-# corresponding SslBump mode ("server-first" or
-# "client-first"). See the ssl_bump option for
-# more information about these modes.
-#
-# A "none" token is logged for requests that
-# triggered "ssl_bump" ACL evaluation matching
-# either a "none" rule or no rules at all.
-#
-# In all other cases, a single dash ("-") is
-# logged.
-#
-# If ICAP is enabled, the following code becomes available (as
-# well as ICAP log codes documented with the icap_log option):
-#
-# icap::tt Total ICAP processing time for the HTTP
-# transaction. The timer ticks when ICAP
-# ACLs are checked and when ICAP
-# transaction is in progress.
-#
-# If adaptation is enabled the following three codes become available:
-#
-# adapt::<last_h The header of the last ICAP response or
-# meta-information from the last eCAP
-# transaction related to the HTTP transaction.
-# Like <h, accepts an optional header name
-# argument.
-#
-# adapt::sum_trs Summed adaptation transaction response
-# times recorded as a comma-separated list in
-# the order of transaction start time. Each time
-# value is recorded as an integer number,
-# representing response time of one or more
-# adaptation (ICAP or eCAP) transaction in
-# milliseconds. When a failed transaction is
-# being retried or repeated, its time is not
-# logged individually but added to the
-# replacement (next) transaction. See also:
-# adapt::all_trs.
-#
-# adapt::all_trs All adaptation transaction response times.
-# Same as adaptation_strs but response times of
-# individual transactions are never added
-# together. Instead, all transaction response
-# times are recorded individually.
-#
-# You can prefix adapt::*_trs format codes with adaptation
-# service name in curly braces to record response time(s) specific
-# to that service. For example: %{my_service}adapt::sum_trs
-#
-# If SSL is enabled, the following formating codes become available:
-#
-# %ssl::>cert_subject The Subject field of the received client
-# SSL certificate or a dash ('-') if Squid has
-# received an invalid/malformed certificate or
-# no certificate at all. Consider encoding the
-# logged value because Subject often has spaces.
-#
-# %ssl::>cert_issuer The Issuer field of the received client
-# SSL certificate or a dash ('-') if Squid has
-# received an invalid/malformed certificate or
-# no certificate at all. Consider encoding the
-# logged value because Issuer often has spaces.
-#
-# The default formats available (which do not need re-defining) are:
-#
-#logformat squid %ts.%03tu %6tr %>a %Ss/%03>Hs %<st %rm %ru %[un %Sh/%<a %mt
-#logformat common %>a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %<st %Ss:%Sh
-#logformat combined %>a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %<st "%{Referer}>h" "%{User-Agent}>h" %Ss:%Sh
-#logformat referrer %ts.%03tu %>a %{Referer}>h %ru
-#logformat useragent %>a [%tl] "%{User-Agent}>h"
-#
-# NOTE: When the log_mime_hdrs directive is set to ON.
-# The squid, common and combined formats have a safely encoded copy
-# of the mime headers appended to each line within a pair of brackets.
-#
-# NOTE: The common and combined formats are not quite true to the Apache definition.
-# The logs from Squid contain an extra status and hierarchy code appended.
-#
-#Default:
-# The format definitions squid, common, combined, referrer, useragent are built in.
-
-# TAG: access_log
-# These files log client request activities. Has a line every HTTP or
-# ICP request. The format is:
-# access_log <module>:<place> [<logformat name> [acl acl ...]]
-# access_log none [acl acl ...]]
-#
-# Will log to the specified module:place using the specified format (which
-# must be defined in a logformat directive) those entries which match
-# ALL the acl's specified (which must be defined in acl clauses).
-# If no acl is specified, all requests will be logged to this destination.
-#
-# ===== Modules Currently available =====
-#
-# none Do not log any requests matching these ACL.
-# Do not specify Place or logformat name.
-#
-# stdio Write each log line to disk immediately at the completion of
-# each request.
-# Place: the filename and path to be written.
-#
-# daemon Very similar to stdio. But instead of writing to disk the log
-# line is passed to a daemon helper for asychronous handling instead.
-# Place: varies depending on the daemon.
-#
-# log_file_daemon Place: the file name and path to be written.
-#
-# syslog To log each request via syslog facility.
-# Place: The syslog facility and priority level for these entries.
-# Place Format: facility.priority
-#
-# where facility could be any of:
-# authpriv, daemon, local0 ... local7 or user.
-#
-# And priority could be any of:
-# err, warning, notice, info, debug.
-#
-# udp To send each log line as text data to a UDP receiver.
-# Place: The destination host name or IP and port.
-# Place Format: //host:port
-#
-# tcp To send each log line as text data to a TCP receiver.
-# Place: The destination host name or IP and port.
-# Place Format: //host:port
-#
-# Default:
-# access_log daemon:/var/log/squid3/access.log squid
-#Default:
-# access_log daemon:/var/log/squid3/access.log squid
-
-# TAG: icap_log
-# ICAP log files record ICAP transaction summaries, one line per
-# transaction.
-#
-# The icap_log option format is:
-# icap_log <filepath> [<logformat name> [acl acl ...]]
-# icap_log none [acl acl ...]]
-#
-# Please see access_log option documentation for details. The two
-# kinds of logs share the overall configuration approach and many
-# features.
-#
-# ICAP processing of a single HTTP message or transaction may
-# require multiple ICAP transactions. In such cases, multiple
-# ICAP transaction log lines will correspond to a single access
-# log line.
-#
-# ICAP log uses logformat codes that make sense for an ICAP
-# transaction. Header-related codes are applied to the HTTP header
-# embedded in an ICAP server response, with the following caveats:
-# For REQMOD, there is no HTTP response header unless the ICAP
-# server performed request satisfaction. For RESPMOD, the HTTP
-# request header is the header sent to the ICAP server. For
-# OPTIONS, there are no HTTP headers.
-#
-# The following format codes are also available for ICAP logs:
-#
-# icap::<A ICAP server IP address. Similar to <A.
-#
-# icap::<service_name ICAP service name from the icap_service
-# option in Squid configuration file.
-#
-# icap::ru ICAP Request-URI. Similar to ru.
-#
-# icap::rm ICAP request method (REQMOD, RESPMOD, or
-# OPTIONS). Similar to existing rm.
-#
-# icap::>st Bytes sent to the ICAP server (TCP payload
-# only; i.e., what Squid writes to the socket).
-#
-# icap::<st Bytes received from the ICAP server (TCP
-# payload only; i.e., what Squid reads from
-# the socket).
-#
-# icap::<bs Number of message body bytes received from the
-# ICAP server. ICAP message body, if any, usually
-# includes encapsulated HTTP message headers and
-# possibly encapsulated HTTP message body. The
-# HTTP body part is dechunked before its size is
-# computed.
-#
-# icap::tr Transaction response time (in
-# milliseconds). The timer starts when
-# the ICAP transaction is created and
-# stops when the transaction is completed.
-# Similar to tr.
-#
-# icap::tio Transaction I/O time (in milliseconds). The
-# timer starts when the first ICAP request
-# byte is scheduled for sending. The timers
-# stops when the last byte of the ICAP response
-# is received.
-#
-# icap::to Transaction outcome: ICAP_ERR* for all
-# transaction errors, ICAP_OPT for OPTION
-# transactions, ICAP_ECHO for 204
-# responses, ICAP_MOD for message
-# modification, and ICAP_SAT for request
-# satisfaction. Similar to Ss.
-#
-# icap::Hs ICAP response status code. Similar to Hs.
-#
-# icap::>h ICAP request header(s). Similar to >h.
-#
-# icap::<h ICAP response header(s). Similar to <h.
-#
-# The default ICAP log format, which can be used without an explicit
-# definition, is called icap_squid:
-#
-#logformat icap_squid %ts.%03tu %6icap::tr %>a %icap::to/%03icap::Hs %icap::<size %icap::rm %icap::ru% %un -/%icap::<A -
-#
-# See also: logformat, log_icap, and %adapt::<last_h
-#Default:
-# none
-
-# TAG: logfile_daemon
-# Specify the path to the logfile-writing daemon. This daemon is
-# used to write the access and store logs, if configured.
-#
-# Squid sends a number of commands to the log daemon:
-# L<data>\n - logfile data
-# R\n - rotate file
-# T\n - truncate file
-# O\n - reopen file
-# F\n - flush file
-# r<n>\n - set rotate count to <n>
-# b<n>\n - 1 = buffer output, 0 = don't buffer output
-#
-# No responses is expected.
-#Default:
-# logfile_daemon /usr/lib/squid3/log_file_daemon
-
-# TAG: log_access allow|deny acl acl...
-# This options allows you to control which requests gets logged
-# to access.log (see access_log directive). Requests denied for
-# logging will also not be accounted for in performance counters.
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#Default:
-# Allow logging for all transactions.
-
-# TAG: log_icap
-# This options allows you to control which requests get logged
-# to icap.log. See the icap_log directive for ICAP log details.
-#Default:
-# Allow logging for all ICAP transactions.
-
-# TAG: cache_store_log
-# Logs the activities of the storage manager. Shows which
-# objects are ejected from the cache, and which objects are
-# saved and for how long.
-# There are not really utilities to analyze this data, so you can safely
-# disable it (the default).
-#
-# Store log uses modular logging outputs. See access_log for the list
-# of modules supported.
-#
-# Example:
-# cache_store_log stdio:/var/log/squid3/store.log
-# cache_store_log daemon:/var/log/squid3/store.log
-#Default:
-# none
-
-# TAG: cache_swap_state
-# Location for the cache "swap.state" file. This index file holds
-# the metadata of objects saved on disk. It is used to rebuild
-# the cache during startup. Normally this file resides in each
-# 'cache_dir' directory, but you may specify an alternate
-# pathname here. Note you must give a full filename, not just
-# a directory. Since this is the index for the whole object
-# list you CANNOT periodically rotate it!
-#
-# If %s can be used in the file name it will be replaced with a
-# a representation of the cache_dir name where each / is replaced
-# with '.'. This is needed to allow adding/removing cache_dir
-# lines when cache_swap_log is being used.
-#
-# If have more than one 'cache_dir', and %s is not used in the name
-# these swap logs will have names such as:
-#
-# cache_swap_log.00
-# cache_swap_log.01
-# cache_swap_log.02
-#
-# The numbered extension (which is added automatically)
-# corresponds to the order of the 'cache_dir' lines in this
-# configuration file. If you change the order of the 'cache_dir'
-# lines in this file, these index files will NOT correspond to
-# the correct 'cache_dir' entry (unless you manually rename
-# them). We recommend you do NOT use this option. It is
-# better to keep these index files in each 'cache_dir' directory.
-#Default:
-# Store the journal inside its cache_dir
-
-# TAG: logfile_rotate
-# Specifies the number of logfile rotations to make when you
-# type 'squid -k rotate'. The default is 10, which will rotate
-# with extensions 0 through 9. Setting logfile_rotate to 0 will
-# disable the file name rotation, but the logfiles are still closed
-# and re-opened. This will enable you to rename the logfiles
-# yourself just before sending the rotate signal.
-#
-# Note, the 'squid -k rotate' command normally sends a USR1
-# signal to the running squid process. In certain situations
-# (e.g. on Linux with Async I/O), USR1 is used for other
-# purposes, so -k rotate uses another signal. It is best to get
-# in the habit of using 'squid -k rotate' instead of 'kill -USR1
-# <pid>'.
-#
-# Note, from Squid-3.1 this option is only a default for cache.log,
-# that log can be rotated separately by using debug_options.
-#
-# Note2, for Debian/Linux the default of logfile_rotate is
-# zero, since it includes external logfile-rotation methods.
-#Default:
-# logfile_rotate 0
-
-# TAG: emulate_httpd_log
-# Replace this with an access_log directive using the format 'common' or 'combined'.
-#Default:
-# none
-
-# TAG: log_ip_on_direct
-# Remove this option from your config. To log server or peer names use %<A in the log format.
-#Default:
-# none
-
-# TAG: mime_table
-# Path to Squid's icon configuration file.
-#
-# You shouldn't need to change this, but the default file contains
-# examples and formatting information if you do.
-#Default:
-# mime_table /usr/share/squid3/mime.conf
-
-# TAG: log_mime_hdrs on|off
-# The Cache can record both the request and the response MIME
-# headers for each HTTP transaction. The headers are encoded
-# safely and will appear as two bracketed fields at the end of
-# the access log (for either the native or httpd-emulated log
-# formats). To enable this logging set log_mime_hdrs to 'on'.
-#Default:
-# log_mime_hdrs off
-
-# TAG: useragent_log
-# Replace this with an access_log directive using the format 'useragent'.
-#Default:
-# none
-
-# TAG: referer_log
-# Replace this with an access_log directive using the format 'referrer'.
-#Default:
-# none
-
-# TAG: pid_filename
-# A filename to write the process-id to. To disable, enter "none".
-#Default:
-# pid_filename /var/run/squid3.pid
-
-# TAG: log_fqdn
-# Remove this option from your config. To log FQDN use %>A in the log format.
-#Default:
-# none
-
-# TAG: client_netmask
-# A netmask for client addresses in logfiles and cachemgr output.
-# Change this to protect the privacy of your cache clients.
-# A netmask of 255.255.255.0 will log all IP's in that range with
-# the last digit set to '0'.
-#Default:
-# Log full client IP address
-
-# TAG: forward_log
-# Use a regular access.log with ACL limiting it to MISS events.
-#Default:
-# none
-
-# TAG: strip_query_terms
-# By default, Squid strips query terms from requested URLs before
-# logging. This protects your user's privacy and reduces log size.
-#
-# When investigating HIT/MISS or other caching behaviour you
-# will need to disable this to see the full URL used by Squid.
-#Default:
-# strip_query_terms on
-
-# TAG: buffered_logs on|off
-# Whether to write/send access_log records ASAP or accumulate them and
-# then write/send them in larger chunks. Buffering may improve
-# performance because it decreases the number of I/Os. However,
-# buffering increases the delay before log records become available to
-# the final recipient (e.g., a disk file or logging daemon) and,
-# hence, increases the risk of log records loss.
-#
-# Note that even when buffered_logs are off, Squid may have to buffer
-# records if it cannot write/send them immediately due to pending I/Os
-# (e.g., the I/O writing the previous log record) or connectivity loss.
-#
-# Currently honored by 'daemon' access_log module only.
-#Default:
-# buffered_logs off
-
-# TAG: netdb_filename
-# Where Squid stores it's netdb journal.
-# When enabled this journal preserves netdb state between restarts.
-#
-# To disable, enter "none".
-#Default:
-# netdb_filename stdio:/var/log/squid3/netdb.state
-
-# OPTIONS FOR TROUBLESHOOTING
-# -----------------------------------------------------------------------------
-
-# TAG: cache_log
-# Squid administrative logging file.
-#
-# This is where general information about Squid behavior goes. You can
-# increase the amount of data logged to this file and how often it is
-# rotated with "debug_options"
-#Default:
-# cache_log /var/log/squid3/cache.log
-
-# TAG: debug_options
-# Logging options are set as section,level where each source file
-# is assigned a unique section. Lower levels result in less
-# output, Full debugging (level 9) can result in a very large
-# log file, so be careful.
-#
-# The magic word "ALL" sets debugging levels for all sections.
-# The default is to run with "ALL,1" to record important warnings.
-#
-# The rotate=N option can be used to keep more or less of these logs
-# than would otherwise be kept by logfile_rotate.
-# For most uses a single log should be enough to monitor current
-# events affecting Squid.
-#Default:
-# Log all critical and important messages.
-
-# TAG: coredump_dir
-# By default Squid leaves core files in the directory from where
-# it was started. If you set 'coredump_dir' to a directory
-# that exists, Squid will chdir() to that directory at startup
-# and coredump files will be left there.
-#
-#Default:
-# Use the directory from where Squid was started.
-#
-
-# Leave coredumps in the first cache dir
-coredump_dir /var/spool/squid3
-
-# OPTIONS FOR FTP GATEWAYING
-# -----------------------------------------------------------------------------
-
-# TAG: ftp_user
-# If you want the anonymous login password to be more informative
-# (and enable the use of picky FTP servers), set this to something
-# reasonable for your domain, like wwwuser@somewhere.net
-#
-# The reason why this is domainless by default is the
-# request can be made on the behalf of a user in any domain,
-# depending on how the cache is used.
-# Some FTP server also validate the email address is valid
-# (for example perl.com).
-#Default:
-# ftp_user Squid@
-
-# TAG: ftp_passive
-# If your firewall does not allow Squid to use passive
-# connections, turn off this option.
-#
-# Use of ftp_epsv_all option requires this to be ON.
-#Default:
-# ftp_passive on
-
-# TAG: ftp_epsv_all
-# FTP Protocol extensions permit the use of a special "EPSV ALL" command.
-#
-# NATs may be able to put the connection on a "fast path" through the
-# translator, as the EPRT command will never be used and therefore,
-# translation of the data portion of the segments will never be needed.
-#
-# When a client only expects to do two-way FTP transfers this may be
-# useful.
-# If squid finds that it must do a three-way FTP transfer after issuing
-# an EPSV ALL command, the FTP session will fail.
-#
-# If you have any doubts about this option do not use it.
-# Squid will nicely attempt all other connection methods.
-#
-# Requires ftp_passive to be ON (default) for any effect.
-#Default:
-# ftp_epsv_all off
-
-# TAG: ftp_epsv
-# FTP Protocol extensions permit the use of a special "EPSV" command.
-#
-# NATs may be able to put the connection on a "fast path" through the
-# translator using EPSV, as the EPRT command will never be used
-# and therefore, translation of the data portion of the segments
-# will never be needed.
-#
-# Turning this OFF will prevent EPSV being attempted.
-# WARNING: Doing so will convert Squid back to the old behavior with all
-# the related problems with external NAT devices/layers.
-#
-# Requires ftp_passive to be ON (default) for any effect.
-#Default:
-# ftp_epsv on
-
-# TAG: ftp_eprt
-# FTP Protocol extensions permit the use of a special "EPRT" command.
-#
-# This extension provides a protocol neutral alternative to the
-# IPv4-only PORT command. When supported it enables active FTP data
-# channels over IPv6 and efficient NAT handling.
-#
-# Turning this OFF will prevent EPRT being attempted and will skip
-# straight to using PORT for IPv4 servers.
-#
-# Some devices are known to not handle this extension correctly and
-# may result in crashes. Devices which suport EPRT enough to fail
-# cleanly will result in Squid attempting PORT anyway. This directive
-# should only be disabled when EPRT results in device failures.
-#
-# WARNING: Doing so will convert Squid back to the old behavior with all
-# the related problems with external NAT devices/layers and IPv4-only FTP.
-#Default:
-# ftp_eprt on
-
-# TAG: ftp_sanitycheck
-# For security and data integrity reasons Squid by default performs
-# sanity checks of the addresses of FTP data connections ensure the
-# data connection is to the requested server. If you need to allow
-# FTP connections to servers using another IP address for the data
-# connection turn this off.
-#Default:
-# ftp_sanitycheck on
-
-# TAG: ftp_telnet_protocol
-# The FTP protocol is officially defined to use the telnet protocol
-# as transport channel for the control connection. However, many
-# implementations are broken and does not respect this aspect of
-# the FTP protocol.
-#
-# If you have trouble accessing files with ASCII code 255 in the
-# path or similar problems involving this ASCII code you can
-# try setting this directive to off. If that helps, report to the
-# operator of the FTP server in question that their FTP server
-# is broken and does not follow the FTP standard.
-#Default:
-# ftp_telnet_protocol on
-
-# OPTIONS FOR EXTERNAL SUPPORT PROGRAMS
-# -----------------------------------------------------------------------------
-
-# TAG: diskd_program
-# Specify the location of the diskd executable.
-# Note this is only useful if you have compiled in
-# diskd as one of the store io modules.
-#Default:
-# diskd_program /usr/lib/squid3/diskd
-
-# TAG: unlinkd_program
-# Specify the location of the executable for file deletion process.
-#Default:
-# unlinkd_program /usr/lib/squid3/unlinkd
-
-# TAG: pinger_program
-# Specify the location of the executable for the pinger process.
-#Default:
-# pinger_program /usr/lib/squid3/pinger
-
-# TAG: pinger_enable
-# Control whether the pinger is active at run-time.
-# Enables turning ICMP pinger on and off with a simple
-# squid -k reconfigure.
-#Default:
-# pinger_enable on
-
-# OPTIONS FOR URL REWRITING
-# -----------------------------------------------------------------------------
-
-# TAG: url_rewrite_program
-# Specify the location of the executable URL rewriter to use.
-# Since they can perform almost any function there isn't one included.
-#
-# For each requested URL, the rewriter will receive on line with the format
-#
-# URL <SP> client_ip "/" fqdn <SP> user <SP> method [<SP> kvpairs]<NL>
-#
-# In the future, the rewriter interface will be extended with
-# key=value pairs ("kvpairs" shown above). Rewriter programs
-# should be prepared to receive and possibly ignore additional
-# whitespace-separated tokens on each input line.
-#
-# And the rewriter may return a rewritten URL. The other components of
-# the request line does not need to be returned (ignored if they are).
-#
-# The rewriter can also indicate that a client-side redirect should
-# be performed to the new URL. This is done by prefixing the returned
-# URL with "301:" (moved permanently) or 302: (moved temporarily), etc.
-#
-# By default, a URL rewriter is not used.
-#Default:
-# none
-
-# TAG: url_rewrite_children
-# The maximum number of redirector processes to spawn. If you limit
-# it too few Squid will have to wait for them to process a backlog of
-# URLs, slowing it down. If you allow too many they will use RAM
-# and other system resources noticably.
-#
-# The startup= and idle= options allow some measure of skew in your
-# tuning.
-#
-# startup=
-#
-# Sets a minimum of how many processes are to be spawned when Squid
-# starts or reconfigures. When set to zero the first request will
-# cause spawning of the first child process to handle it.
-#
-# Starting too few will cause an initial slowdown in traffic as Squid
-# attempts to simultaneously spawn enough processes to cope.
-#
-# idle=
-#
-# Sets a minimum of how many processes Squid is to try and keep available
-# at all times. When traffic begins to rise above what the existing
-# processes can handle this many more will be spawned up to the maximum
-# configured. A minimum setting of 1 is required.
-#
-# concurrency=
-#
-# The number of requests each redirector helper can handle in
-# parallel. Defaults to 0 which indicates the redirector
-# is a old-style single threaded redirector.
-#
-# When this directive is set to a value >= 1 then the protocol
-# used to communicate with the helper is modified to include
-# a request ID in front of the request/response. The request
-# ID from the request must be echoed back with the response
-# to that request.
-#Default:
-# url_rewrite_children 20 startup=0 idle=1 concurrency=0
-
-# TAG: url_rewrite_host_header
-# To preserve same-origin security policies in browsers and
-# prevent Host: header forgery by redirectors Squid rewrites
-# any Host: header in redirected requests.
-#
-# If you are running an accelerator this may not be a wanted
-# effect of a redirector. This directive enables you disable
-# Host: alteration in reverse-proxy traffic.
-#
-# WARNING: Entries are cached on the result of the URL rewriting
-# process, so be careful if you have domain-virtual hosts.
-#
-# WARNING: Squid and other software verifies the URL and Host
-# are matching, so be careful not to relay through other proxies
-# or inspecting firewalls with this disabled.
-#Default:
-# url_rewrite_host_header on
-
-# TAG: url_rewrite_access
-# If defined, this access list specifies which requests are
-# sent to the redirector processes.
-#
-# This clause supports both fast and slow acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#Default:
-# Allow, unless rules exist in squid.conf.
-
-# TAG: url_rewrite_bypass
-# When this is 'on', a request will not go through the
-# redirector if all the helpers are busy. If this is 'off'
-# and the redirector queue grows too large, Squid will exit
-# with a FATAL error and ask you to increase the number of
-# redirectors. You should only enable this if the redirectors
-# are not critical to your caching system. If you use
-# redirectors for access control, and you enable this option,
-# users may have access to pages they should not
-# be allowed to request.
-#Default:
-# url_rewrite_bypass off
-
-# OPTIONS FOR TUNING THE CACHE
-# -----------------------------------------------------------------------------
-
-# TAG: cache
-# A list of ACL elements which, if matched and denied, cause the request to
-# not be satisfied from the cache and the reply to not be cached.
-# In other words, use this to force certain objects to never be cached.
-#
-# You must use the words 'allow' or 'deny' to indicate whether items
-# matching the ACL should be allowed or denied into the cache.
-#
-# This clause supports both fast and slow acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#Default:
-# Allow caching, unless rules exist in squid.conf.
-
-# TAG: max_stale time-units
-# This option puts an upper limit on how stale content Squid
-# will serve from the cache if cache validation fails.
-# Can be overriden by the refresh_pattern max-stale option.
-#Default:
-# max_stale 1 week
-
-# TAG: refresh_pattern
-# usage: refresh_pattern [-i] regex min percent max [options]
-#
-# By default, regular expressions are CASE-SENSITIVE. To make
-# them case-insensitive, use the -i option.
-#
-# 'Min' is the time (in minutes) an object without an explicit
-# expiry time should be considered fresh. The recommended
-# value is 0, any higher values may cause dynamic applications
-# to be erroneously cached unless the application designer
-# has taken the appropriate actions.
-#
-# 'Percent' is a percentage of the objects age (time since last
-# modification age) an object without explicit expiry time
-# will be considered fresh.
-#
-# 'Max' is an upper limit on how long objects without an explicit
-# expiry time will be considered fresh.
-#
-# options: override-expire
-# override-lastmod
-# reload-into-ims
-# ignore-reload
-# ignore-no-store
-# ignore-must-revalidate
-# ignore-private
-# ignore-auth
-# max-stale=NN
-# refresh-ims
-# store-stale
-#
-# override-expire enforces min age even if the server
-# sent an explicit expiry time (e.g., with the
-# Expires: header or Cache-Control: max-age). Doing this
-# VIOLATES the HTTP standard. Enabling this feature
-# could make you liable for problems which it causes.
-#
-# Note: override-expire does not enforce staleness - it only extends
-# freshness / min. If the server returns a Expires time which
-# is longer than your max time, Squid will still consider
-# the object fresh for that period of time.
-#
-# override-lastmod enforces min age even on objects
-# that were modified recently.
-#
-# reload-into-ims changes client no-cache or ``reload''
-# to If-Modified-Since requests. Doing this VIOLATES the
-# HTTP standard. Enabling this feature could make you
-# liable for problems which it causes.
-#
-# ignore-reload ignores a client no-cache or ``reload''
-# header. Doing this VIOLATES the HTTP standard. Enabling
-# this feature could make you liable for problems which
-# it causes.
-#
-# ignore-no-store ignores any ``Cache-control: no-store''
-# headers received from a server. Doing this VIOLATES
-# the HTTP standard. Enabling this feature could make you
-# liable for problems which it causes.
-#
-# ignore-must-revalidate ignores any ``Cache-Control: must-revalidate``
-# headers received from a server. Doing this VIOLATES
-# the HTTP standard. Enabling this feature could make you
-# liable for problems which it causes.
-#
-# ignore-private ignores any ``Cache-control: private''
-# headers received from a server. Doing this VIOLATES
-# the HTTP standard. Enabling this feature could make you
-# liable for problems which it causes.
-#
-# ignore-auth caches responses to requests with authorization,
-# as if the originserver had sent ``Cache-control: public''
-# in the response header. Doing this VIOLATES the HTTP standard.
-# Enabling this feature could make you liable for problems which
-# it causes.
-#
-# refresh-ims causes squid to contact the origin server
-# when a client issues an If-Modified-Since request. This
-# ensures that the client will receive an updated version
-# if one is available.
-#
-# store-stale stores responses even if they don't have explicit
-# freshness or a validator (i.e., Last-Modified or an ETag)
-# present, or if they're already stale. By default, Squid will
-# not cache such responses because they usually can't be
-# reused. Note that such responses will be stale by default.
-#
-# max-stale=NN provide a maximum staleness factor. Squid won't
-# serve objects more stale than this even if it failed to
-# validate the object. Default: use the max_stale global limit.
-#
-# Basically a cached object is:
-#
-# FRESH if expires < now, else STALE
-# STALE if age > max
-# FRESH if lm-factor < percent, else STALE
-# FRESH if age < min
-# else STALE
-#
-# The refresh_pattern lines are checked in the order listed here.
-# The first entry which matches is used. If none of the entries
-# match the default will be used.
-#
-# Note, you must uncomment all the default lines if you want
-# to change one. The default setting is only active if none is
-# used.
-#
-#
-
-#
-# Add any of your own refresh_pattern entries above these.
-#
-refresh_pattern ^ftp: 1440 20% 10080
-refresh_pattern ^gopher: 1440 0% 1440
-refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
-refresh_pattern (Release|Packages(.gz)*)$ 0 20% 2880
-# example lin deb packages
-#refresh_pattern (\.deb|\.udeb)$ 129600 100% 129600
-refresh_pattern . 0 20% 4320
-
-# TAG: quick_abort_min (KB)
-#Default:
-# quick_abort_min 16 KB
-
-# TAG: quick_abort_max (KB)
-#Default:
-# quick_abort_max 16 KB
-
-# TAG: quick_abort_pct (percent)
-# The cache by default continues downloading aborted requests
-# which are almost completed (less than 16 KB remaining). This
-# may be undesirable on slow (e.g. SLIP) links and/or very busy
-# caches. Impatient users may tie up file descriptors and
-# bandwidth by repeatedly requesting and immediately aborting
-# downloads.
-#
-# When the user aborts a request, Squid will check the
-# quick_abort values to the amount of data transfered until
-# then.
-#
-# If the transfer has less than 'quick_abort_min' KB remaining,
-# it will finish the retrieval.
-#
-# If the transfer has more than 'quick_abort_max' KB remaining,
-# it will abort the retrieval.
-#
-# If more than 'quick_abort_pct' of the transfer has completed,
-# it will finish the retrieval.
-#
-# If you do not want any retrieval to continue after the client
-# has aborted, set both 'quick_abort_min' and 'quick_abort_max'
-# to '0 KB'.
-#
-# If you want retrievals to always continue if they are being
-# cached set 'quick_abort_min' to '-1 KB'.
-#Default:
-# quick_abort_pct 95
-
-# TAG: read_ahead_gap buffer-size
-# The amount of data the cache will buffer ahead of what has been
-# sent to the client when retrieving an object from another server.
-#Default:
-# read_ahead_gap 16 KB
-
-# TAG: negative_ttl time-units
-# Set the Default Time-to-Live (TTL) for failed requests.
-# Certain types of failures (such as "connection refused" and
-# "404 Not Found") are able to be negatively-cached for a short time.
-# Modern web servers should provide Expires: header, however if they
-# do not this can provide a minimum TTL.
-# The default is not to cache errors with unknown expiry details.
-#
-# Note that this is different from negative caching of DNS lookups.
-#
-# WARNING: Doing this VIOLATES the HTTP standard. Enabling
-# this feature could make you liable for problems which it
-# causes.
-#Default:
-# negative_ttl 0 seconds
-
-# TAG: positive_dns_ttl time-units
-# Upper limit on how long Squid will cache positive DNS responses.
-# Default is 6 hours (360 minutes). This directive must be set
-# larger than negative_dns_ttl.
-#Default:
-# positive_dns_ttl 6 hours
-
-# TAG: negative_dns_ttl time-units
-# Time-to-Live (TTL) for negative caching of failed DNS lookups.
-# This also sets the lower cache limit on positive lookups.
-# Minimum value is 1 second, and it is not recommendable to go
-# much below 10 seconds.
-#Default:
-# negative_dns_ttl 1 minutes
-
-# TAG: range_offset_limit size [acl acl...]
-# usage: (size) [units] [[!]aclname]
-#
-# Sets an upper limit on how far (number of bytes) into the file
-# a Range request may be to cause Squid to prefetch the whole file.
-# If beyond this limit, Squid forwards the Range request as it is and
-# the result is NOT cached.
-#
-# This is to stop a far ahead range request (lets say start at 17MB)
-# from making Squid fetch the whole object up to that point before
-# sending anything to the client.
-#
-# Multiple range_offset_limit lines may be specified, and they will
-# be searched from top to bottom on each request until a match is found.
-# The first match found will be used. If no line matches a request, the
-# default limit of 0 bytes will be used.
-#
-# 'size' is the limit specified as a number of units.
-#
-# 'units' specifies whether to use bytes, KB, MB, etc.
-# If no units are specified bytes are assumed.
-#
-# A size of 0 causes Squid to never fetch more than the
-# client requested. (default)
-#
-# A size of 'none' causes Squid to always fetch the object from the
-# beginning so it may cache the result. (2.0 style)
-#
-# 'aclname' is the name of a defined ACL.
-#
-# NP: Using 'none' as the byte value here will override any quick_abort settings
-# that may otherwise apply to the range request. The range request will
-# be fully fetched from start to finish regardless of the client
-# actions. This affects bandwidth usage.
-#Default:
-# none
-
-# TAG: minimum_expiry_time (seconds)
-# The minimum caching time according to (Expires - Date)
-# headers Squid honors if the object can't be revalidated.
-# The default is 60 seconds.
-#
-# In reverse proxy environments it might be desirable to honor
-# shorter object lifetimes. It is most likely better to make
-# your server return a meaningful Last-Modified header however.
-#
-# In ESI environments where page fragments often have short
-# lifetimes, this will often be best set to 0.
-#Default:
-# minimum_expiry_time 60 seconds
-
-# TAG: store_avg_object_size (bytes)
-# Average object size, used to estimate number of objects your
-# cache can hold. The default is 13 KB.
-#
-# This is used to pre-seed the cache index memory allocation to
-# reduce expensive reallocate operations while handling clients
-# traffic. Too-large values may result in memory allocation during
-# peak traffic, too-small values will result in wasted memory.
-#
-# Check the cache manager 'info' report metrics for the real
-# object sizes seen by your Squid before tuning this.
-#Default:
-# store_avg_object_size 13 KB
-
-# TAG: store_objects_per_bucket
-# Target number of objects per bucket in the store hash table.
-# Lowering this value increases the total number of buckets and
-# also the storage maintenance rate. The default is 20.
-#Default:
-# store_objects_per_bucket 20
-
-# HTTP OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: request_header_max_size (KB)
-# This specifies the maximum size for HTTP headers in a request.
-# Request headers are usually relatively small (about 512 bytes).
-# Placing a limit on the request header size will catch certain
-# bugs (for example with persistent connections) and possibly
-# buffer-overflow or denial-of-service attacks.
-#Default:
-# request_header_max_size 64 KB
-
-# TAG: reply_header_max_size (KB)
-# This specifies the maximum size for HTTP headers in a reply.
-# Reply headers are usually relatively small (about 512 bytes).
-# Placing a limit on the reply header size will catch certain
-# bugs (for example with persistent connections) and possibly
-# buffer-overflow or denial-of-service attacks.
-#Default:
-# reply_header_max_size 64 KB
-
-# TAG: request_body_max_size (bytes)
-# This specifies the maximum size for an HTTP request body.
-# In other words, the maximum size of a PUT/POST request.
-# A user who attempts to send a request with a body larger
-# than this limit receives an "Invalid Request" error message.
-# If you set this parameter to a zero (the default), there will
-# be no limit imposed.
-#
-# See also client_request_buffer_max_size for an alternative
-# limitation on client uploads which can be configured.
-#Default:
-# No limit.
-
-# TAG: client_request_buffer_max_size (bytes)
-# This specifies the maximum buffer size of a client request.
-# It prevents squid eating too much memory when somebody uploads
-# a large file.
-#Default:
-# client_request_buffer_max_size 512 KB
-
-# TAG: chunked_request_body_max_size (bytes)
-# A broken or confused HTTP/1.1 client may send a chunked HTTP
-# request to Squid. Squid does not have full support for that
-# feature yet. To cope with such requests, Squid buffers the
-# entire request and then dechunks request body to create a
-# plain HTTP/1.0 request with a known content length. The plain
-# request is then used by the rest of Squid code as usual.
-#
-# The option value specifies the maximum size of the buffer used
-# to hold the request before the conversion. If the chunked
-# request size exceeds the specified limit, the conversion
-# fails, and the client receives an "unsupported request" error,
-# as if dechunking was disabled.
-#
-# Dechunking is enabled by default. To disable conversion of
-# chunked requests, set the maximum to zero.
-#
-# Request dechunking feature and this option in particular are a
-# temporary hack. When chunking requests and responses are fully
-# supported, there will be no need to buffer a chunked request.
-#Default:
-# chunked_request_body_max_size 64 KB
-
-# TAG: broken_posts
-# A list of ACL elements which, if matched, causes Squid to send
-# an extra CRLF pair after the body of a PUT/POST request.
-#
-# Some HTTP servers has broken implementations of PUT/POST,
-# and rely on an extra CRLF pair sent by some WWW clients.
-#
-# Quote from RFC2616 section 4.1 on this matter:
-#
-# Note: certain buggy HTTP/1.0 client implementations generate an
-# extra CRLF's after a POST request. To restate what is explicitly
-# forbidden by the BNF, an HTTP/1.1 client must not preface or follow
-# a request with an extra CRLF.
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#
-#Example:
-# acl buggy_server url_regex ^http://....
-# broken_posts allow buggy_server
-#Default:
-# Obey RFC 2616.
-
-# TAG: adaptation_uses_indirect_client on|off
-# Controls whether the indirect client IP address (instead of the direct
-# client IP address) is passed to adaptation services.
-#
-# See also: follow_x_forwarded_for adaptation_send_client_ip
-#Default:
-# adaptation_uses_indirect_client on
-
-# TAG: via on|off
-# If set (default), Squid will include a Via header in requests and
-# replies as required by RFC2616.
-#Default:
-# via on
-
-# TAG: ie_refresh on|off
-# Microsoft Internet Explorer up until version 5.5 Service
-# Pack 1 has an issue with transparent proxies, wherein it
-# is impossible to force a refresh. Turning this on provides
-# a partial fix to the problem, by causing all IMS-REFRESH
-# requests from older IE versions to check the origin server
-# for fresh content. This reduces hit ratio by some amount
-# (~10% in my experience), but allows users to actually get
-# fresh content when they want it. Note because Squid
-# cannot tell if the user is using 5.5 or 5.5SP1, the behavior
-# of 5.5 is unchanged from old versions of Squid (i.e. a
-# forced refresh is impossible). Newer versions of IE will,
-# hopefully, continue to have the new behavior and will be
-# handled based on that assumption. This option defaults to
-# the old Squid behavior, which is better for hit ratios but
-# worse for clients using IE, if they need to be able to
-# force fresh content.
-#Default:
-# ie_refresh off
-
-# TAG: vary_ignore_expire on|off
-# Many HTTP servers supporting Vary gives such objects
-# immediate expiry time with no cache-control header
-# when requested by a HTTP/1.0 client. This option
-# enables Squid to ignore such expiry times until
-# HTTP/1.1 is fully implemented.
-#
-# WARNING: If turned on this may eventually cause some
-# varying objects not intended for caching to get cached.
-#Default:
-# vary_ignore_expire off
-
-# TAG: request_entities
-# Squid defaults to deny GET and HEAD requests with request entities,
-# as the meaning of such requests are undefined in the HTTP standard
-# even if not explicitly forbidden.
-#
-# Set this directive to on if you have clients which insists
-# on sending request entities in GET or HEAD requests. But be warned
-# that there is server software (both proxies and web servers) which
-# can fail to properly process this kind of request which may make you
-# vulnerable to cache pollution attacks if enabled.
-#Default:
-# request_entities off
-
-# TAG: request_header_access
-# Usage: request_header_access header_name allow|deny [!]aclname ...
-#
-# WARNING: Doing this VIOLATES the HTTP standard. Enabling
-# this feature could make you liable for problems which it
-# causes.
-#
-# This option replaces the old 'anonymize_headers' and the
-# older 'http_anonymizer' option with something that is much
-# more configurable. A list of ACLs for each header name allows
-# removal of specific header fields under specific conditions.
-#
-# This option only applies to outgoing HTTP request headers (i.e.,
-# headers sent by Squid to the next HTTP hop such as a cache peer
-# or an origin server). The option has no effect during cache hit
-# detection. The equivalent adaptation vectoring point in ICAP
-# terminology is post-cache REQMOD.
-#
-# The option is applied to individual outgoing request header
-# fields. For each request header field F, Squid uses the first
-# qualifying sets of request_header_access rules:
-#
-# 1. Rules with header_name equal to F's name.
-# 2. Rules with header_name 'Other', provided F's name is not
-# on the hard-coded list of commonly used HTTP header names.
-# 3. Rules with header_name 'All'.
-#
-# Within that qualifying rule set, rule ACLs are checked as usual.
-# If ACLs of an "allow" rule match, the header field is allowed to
-# go through as is. If ACLs of a "deny" rule match, the header is
-# removed and request_header_replace is then checked to identify
-# if the removed header has a replacement. If no rules within the
-# set have matching ACLs, the header field is left as is.
-#
-# For example, to achieve the same behavior as the old
-# 'http_anonymizer standard' option, you should use:
-#
-# request_header_access From deny all
-# request_header_access Referer deny all
-# request_header_access User-Agent deny all
-#
-# Or, to reproduce the old 'http_anonymizer paranoid' feature
-# you should use:
-#
-# request_header_access Authorization allow all
-# request_header_access Proxy-Authorization allow all
-# request_header_access Cache-Control allow all
-# request_header_access Content-Length allow all
-# request_header_access Content-Type allow all
-# request_header_access Date allow all
-# request_header_access Host allow all
-# request_header_access If-Modified-Since allow all
-# request_header_access Pragma allow all
-# request_header_access Accept allow all
-# request_header_access Accept-Charset allow all
-# request_header_access Accept-Encoding allow all
-# request_header_access Accept-Language allow all
-# request_header_access Connection allow all
-# request_header_access All deny all
-#
-# HTTP reply headers are controlled with the reply_header_access directive.
-#
-# By default, all headers are allowed (no anonymizing is performed).
-#Default:
-# No limits.
-
-# TAG: reply_header_access
-# Usage: reply_header_access header_name allow|deny [!]aclname ...
-#
-# WARNING: Doing this VIOLATES the HTTP standard. Enabling
-# this feature could make you liable for problems which it
-# causes.
-#
-# This option only applies to reply headers, i.e., from the
-# server to the client.
-#
-# This is the same as request_header_access, but in the other
-# direction. Please see request_header_access for detailed
-# documentation.
-#
-# For example, to achieve the same behavior as the old
-# 'http_anonymizer standard' option, you should use:
-#
-# reply_header_access Server deny all
-# reply_header_access WWW-Authenticate deny all
-# reply_header_access Link deny all
-#
-# Or, to reproduce the old 'http_anonymizer paranoid' feature
-# you should use:
-#
-# reply_header_access Allow allow all
-# reply_header_access WWW-Authenticate allow all
-# reply_header_access Proxy-Authenticate allow all
-# reply_header_access Cache-Control allow all
-# reply_header_access Content-Encoding allow all
-# reply_header_access Content-Length allow all
-# reply_header_access Content-Type allow all
-# reply_header_access Date allow all
-# reply_header_access Expires allow all
-# reply_header_access Last-Modified allow all
-# reply_header_access Location allow all
-# reply_header_access Pragma allow all
-# reply_header_access Content-Language allow all
-# reply_header_access Retry-After allow all
-# reply_header_access Title allow all
-# reply_header_access Content-Disposition allow all
-# reply_header_access Connection allow all
-# reply_header_access All deny all
-#
-# HTTP request headers are controlled with the request_header_access directive.
-#
-# By default, all headers are allowed (no anonymizing is
-# performed).
-#Default:
-# No limits.
-
-# TAG: request_header_replace
-# Usage: request_header_replace header_name message
-# Example: request_header_replace User-Agent Nutscrape/1.0 (CP/M; 8-bit)
-#
-# This option allows you to change the contents of headers
-# denied with request_header_access above, by replacing them
-# with some fixed string.
-#
-# This only applies to request headers, not reply headers.
-#
-# By default, headers are removed if denied.
-#Default:
-# none
-
-# TAG: reply_header_replace
-# Usage: reply_header_replace header_name message
-# Example: reply_header_replace Server Foo/1.0
-#
-# This option allows you to change the contents of headers
-# denied with reply_header_access above, by replacing them
-# with some fixed string.
-#
-# This only applies to reply headers, not request headers.
-#
-# By default, headers are removed if denied.
-#Default:
-# none
-
-# TAG: request_header_add
-# Usage: request_header_add field-name field-value acl1 [acl2] ...
-# Example: request_header_add X-Client-CA "CA=%ssl::>cert_issuer" all
-#
-# This option adds header fields to outgoing HTTP requests (i.e.,
-# request headers sent by Squid to the next HTTP hop such as a
-# cache peer or an origin server). The option has no effect during
-# cache hit detection. The equivalent adaptation vectoring point
-# in ICAP terminology is post-cache REQMOD.
-#
-# Field-name is a token specifying an HTTP header name. If a
-# standard HTTP header name is used, Squid does not check whether
-# the new header conflicts with any existing headers or violates
-# HTTP rules. If the request to be modified already contains a
-# field with the same name, the old field is preserved but the
-# header field values are not merged.
-#
-# Field-value is either a token or a quoted string. If quoted
-# string format is used, then the surrounding quotes are removed
-# while escape sequences and %macros are processed.
-#
-# In theory, all of the logformat codes can be used as %macros.
-# However, unlike logging (which happens at the very end of
-# transaction lifetime), the transaction may not yet have enough
-# information to expand a macro when the new header value is needed.
-# And some information may already be available to Squid but not yet
-# committed where the macro expansion code can access it (report
-# such instances!). The macro will be expanded into a single dash
-# ('-') in such cases. Not all macros have been tested.
-#
-# One or more Squid ACLs may be specified to restrict header
-# injection to matching requests. As always in squid.conf, all
-# ACLs in an option ACL list must be satisfied for the insertion
-# to happen. The request_header_add option supports fast ACLs
-# only.
-#Default:
-# none
-
-# TAG: relaxed_header_parser on|off|warn
-# In the default "on" setting Squid accepts certain forms
-# of non-compliant HTTP messages where it is unambiguous
-# what the sending application intended even if the message
-# is not correctly formatted. The messages is then normalized
-# to the correct form when forwarded by Squid.
-#
-# If set to "warn" then a warning will be emitted in cache.log
-# each time such HTTP error is encountered.
-#
-# If set to "off" then such HTTP errors will cause the request
-# or response to be rejected.
-#Default:
-# relaxed_header_parser on
-
-# TIMEOUTS
-# -----------------------------------------------------------------------------
-
-# TAG: forward_timeout time-units
-# This parameter specifies how long Squid should at most attempt in
-# finding a forwarding path for the request before giving up.
-#Default:
-# forward_timeout 4 minutes
-
-# TAG: connect_timeout time-units
-# This parameter specifies how long to wait for the TCP connect to
-# the requested server or peer to complete before Squid should
-# attempt to find another path where to forward the request.
-#Default:
-# connect_timeout 1 minute
-
-# TAG: peer_connect_timeout time-units
-# This parameter specifies how long to wait for a pending TCP
-# connection to a peer cache. The default is 30 seconds. You
-# may also set different timeout values for individual neighbors
-# with the 'connect-timeout' option on a 'cache_peer' line.
-#Default:
-# peer_connect_timeout 30 seconds
-
-# TAG: read_timeout time-units
-# The read_timeout is applied on server-side connections. After
-# each successful read(), the timeout will be extended by this
-# amount. If no data is read again after this amount of time,
-# the request is aborted and logged with ERR_READ_TIMEOUT. The
-# default is 15 minutes.
-#Default:
-# read_timeout 15 minutes
-
-# TAG: write_timeout time-units
-# This timeout is tracked for all connections that have data
-# available for writing and are waiting for the socket to become
-# ready. After each successful write, the timeout is extended by
-# the configured amount. If Squid has data to write but the
-# connection is not ready for the configured duration, the
-# transaction associated with the connection is terminated. The
-# default is 15 minutes.
-#Default:
-# write_timeout 15 minutes
-
-# TAG: request_timeout
-# How long to wait for complete HTTP request headers after initial
-# connection establishment.
-#Default:
-# request_timeout 5 minutes
-
-# TAG: client_idle_pconn_timeout
-# How long to wait for the next HTTP request on a persistent
-# client connection after the previous request completes.
-#Default:
-# client_idle_pconn_timeout 2 minutes
-
-# TAG: client_lifetime time-units
-# The maximum amount of time a client (browser) is allowed to
-# remain connected to the cache process. This protects the Cache
-# from having a lot of sockets (and hence file descriptors) tied up
-# in a CLOSE_WAIT state from remote clients that go away without
-# properly shutting down (either because of a network failure or
-# because of a poor client implementation). The default is one
-# day, 1440 minutes.
-#
-# NOTE: The default value is intended to be much larger than any
-# client would ever need to be connected to your cache. You
-# should probably change client_lifetime only as a last resort.
-# If you seem to have many client connections tying up
-# filedescriptors, we recommend first tuning the read_timeout,
-# request_timeout, persistent_request_timeout and quick_abort values.
-#Default:
-# client_lifetime 1 day
-
-# TAG: half_closed_clients
-# Some clients may shutdown the sending side of their TCP
-# connections, while leaving their receiving sides open. Sometimes,
-# Squid can not tell the difference between a half-closed and a
-# fully-closed TCP connection.
-#
-# By default, Squid will immediately close client connections when
-# read(2) returns "no more data to read."
-#
-# Change this option to 'on' and Squid will keep open connections
-# until a read(2) or write(2) on the socket returns an error.
-# This may show some benefits for reverse proxies. But if not
-# it is recommended to leave OFF.
-#Default:
-# half_closed_clients off
-
-# TAG: server_idle_pconn_timeout
-# Timeout for idle persistent connections to servers and other
-# proxies.
-#Default:
-# server_idle_pconn_timeout 1 minute
-
-# TAG: ident_timeout
-# Maximum time to wait for IDENT lookups to complete.
-#
-# If this is too high, and you enabled IDENT lookups from untrusted
-# users, you might be susceptible to denial-of-service by having
-# many ident requests going at once.
-#Default:
-# ident_timeout 10 seconds
-
-# TAG: shutdown_lifetime time-units
-# When SIGTERM or SIGHUP is received, the cache is put into
-# "shutdown pending" mode until all active sockets are closed.
-# This value is the lifetime to set for all open descriptors
-# during shutdown mode. Any active clients after this many
-# seconds will receive a 'timeout' message.
-#Default:
-# shutdown_lifetime 30 seconds
-
-# ADMINISTRATIVE PARAMETERS
-# -----------------------------------------------------------------------------
-
-# TAG: cache_mgr
-# Email-address of local cache manager who will receive
-# mail if the cache dies. The default is "webmaster".
-#Default:
-# cache_mgr webmaster
-
-# TAG: mail_from
-# From: email-address for mail sent when the cache dies.
-# The default is to use 'squid@unique_hostname'.
-#
-# See also: unique_hostname directive.
-#Default:
-# none
-
-# TAG: mail_program
-# Email program used to send mail if the cache dies.
-# The default is "mail". The specified program must comply
-# with the standard Unix mail syntax:
-# mail-program recipient < mailfile
-#
-# Optional command line options can be specified.
-#Default:
-# mail_program mail
-
-# TAG: cache_effective_user
-# If you start Squid as root, it will change its effective/real
-# UID/GID to the user specified below. The default is to change
-# to UID of proxy.
-# see also; cache_effective_group
-#Default:
-# cache_effective_user proxy
-
-# TAG: cache_effective_group
-# Squid sets the GID to the effective user's default group ID
-# (taken from the password file) and supplementary group list
-# from the groups membership.
-#
-# If you want Squid to run with a specific GID regardless of
-# the group memberships of the effective user then set this
-# to the group (or GID) you want Squid to run as. When set
-# all other group privileges of the effective user are ignored
-# and only this GID is effective. If Squid is not started as
-# root the user starting Squid MUST be member of the specified
-# group.
-#
-# This option is not recommended by the Squid Team.
-# Our preference is for administrators to configure a secure
-# user account for squid with UID/GID matching system policies.
-#Default:
-# Use system group memberships of the cache_effective_user account
-
-# TAG: httpd_suppress_version_string on|off
-# Suppress Squid version string info in HTTP headers and HTML error pages.
-#Default:
-# httpd_suppress_version_string off
-
-# TAG: visible_hostname
-# If you want to present a special hostname in error messages, etc,
-# define this. Otherwise, the return value of gethostname()
-# will be used. If you have multiple caches in a cluster and
-# get errors about IP-forwarding you must set them to have individual
-# names with this setting.
-#Default:
-# Automatically detect the system host name
-
-# TAG: unique_hostname
-# If you want to have multiple machines with the same
-# 'visible_hostname' you must give each machine a different
-# 'unique_hostname' so forwarding loops can be detected.
-#Default:
-# Copy the value from visible_hostname
-
-# TAG: hostname_aliases
-# A list of other DNS names your cache has.
-#Default:
-# none
-
-# TAG: umask
-# Minimum umask which should be enforced while the proxy
-# is running, in addition to the umask set at startup.
-#
-# For a traditional octal representation of umasks, start
-# your value with 0.
-#Default:
-# umask 027
-
-# OPTIONS FOR THE CACHE REGISTRATION SERVICE
-# -----------------------------------------------------------------------------
-#
-# This section contains parameters for the (optional) cache
-# announcement service. This service is provided to help
-# cache administrators locate one another in order to join or
-# create cache hierarchies.
-#
-# An 'announcement' message is sent (via UDP) to the registration
-# service by Squid. By default, the announcement message is NOT
-# SENT unless you enable it with 'announce_period' below.
-#
-# The announcement message includes your hostname, plus the
-# following information from this configuration file:
-#
-# http_port
-# icp_port
-# cache_mgr
-#
-# All current information is processed regularly and made
-# available on the Web at http://www.ircache.net/Cache/Tracker/.
-
-# TAG: announce_period
-# This is how frequently to send cache announcements.
-#
-# To enable announcing your cache, just set an announce period.
-#
-# Example:
-# announce_period 1 day
-#Default:
-# Announcement messages disabled.
-
-# TAG: announce_host
-# Set the hostname where announce registration messages will be sent.
-#
-# See also announce_port and announce_file
-#Default:
-# announce_host tracker.ircache.net
-
-# TAG: announce_file
-# The contents of this file will be included in the announce
-# registration messages.
-#Default:
-# none
-
-# TAG: announce_port
-# Set the port where announce registration messages will be sent.
-#
-# See also announce_host and announce_file
-#Default:
-# announce_port 3131
-
-# HTTPD-ACCELERATOR OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: httpd_accel_surrogate_id
-# Surrogates (http://www.esi.org/architecture_spec_1.0.html)
-# need an identification token to allow control targeting. Because
-# a farm of surrogates may all perform the same tasks, they may share
-# an identification token.
-#Default:
-# visible_hostname is used if no specific ID is set.
-
-# TAG: http_accel_surrogate_remote on|off
-# Remote surrogates (such as those in a CDN) honour the header
-# "Surrogate-Control: no-store-remote".
-#
-# Set this to on to have squid behave as a remote surrogate.
-#Default:
-# http_accel_surrogate_remote off
-
-# TAG: esi_parser libxml2|expat|custom
-# ESI markup is not strictly XML compatible. The custom ESI parser
-# will give higher performance, but cannot handle non ASCII character
-# encodings.
-#Default:
-# esi_parser custom
-
-# DELAY POOL PARAMETERS
-# -----------------------------------------------------------------------------
-
-# TAG: delay_pools
-# This represents the number of delay pools to be used. For example,
-# if you have one class 2 delay pool and one class 3 delays pool, you
-# have a total of 2 delay pools.
-#
-# See also delay_parameters, delay_class, delay_access for pool
-# configuration details.
-#Default:
-# delay_pools 0
-
-# TAG: delay_class
-# This defines the class of each delay pool. There must be exactly one
-# delay_class line for each delay pool. For example, to define two
-# delay pools, one of class 2 and one of class 3, the settings above
-# and here would be:
-#
-# Example:
-# delay_pools 4 # 4 delay pools
-# delay_class 1 2 # pool 1 is a class 2 pool
-# delay_class 2 3 # pool 2 is a class 3 pool
-# delay_class 3 4 # pool 3 is a class 4 pool
-# delay_class 4 5 # pool 4 is a class 5 pool
-#
-# The delay pool classes are:
-#
-# class 1 Everything is limited by a single aggregate
-# bucket.
-#
-# class 2 Everything is limited by a single aggregate
-# bucket as well as an "individual" bucket chosen
-# from bits 25 through 32 of the IPv4 address.
-#
-# class 3 Everything is limited by a single aggregate
-# bucket as well as a "network" bucket chosen
-# from bits 17 through 24 of the IP address and a
-# "individual" bucket chosen from bits 17 through
-# 32 of the IPv4 address.
-#
-# class 4 Everything in a class 3 delay pool, with an
-# additional limit on a per user basis. This
-# only takes effect if the username is established
-# in advance - by forcing authentication in your
-# http_access rules.
-#
-# class 5 Requests are grouped according their tag (see
-# external_acl's tag= reply).
-#
-#
-# Each pool also requires a delay_parameters directive to configure the pool size
-# and speed limits used whenever the pool is applied to a request. Along with
-# a set of delay_access directives to determine when it is used.
-#
-# NOTE: If an IP address is a.b.c.d
-# -> bits 25 through 32 are "d"
-# -> bits 17 through 24 are "c"
-# -> bits 17 through 32 are "c * 256 + d"
-#
-# NOTE-2: Due to the use of bitmasks in class 2,3,4 pools they only apply to
-# IPv4 traffic. Class 1 and 5 pools may be used with IPv6 traffic.
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#
-# See also delay_parameters and delay_access.
-#Default:
-# none
-
-# TAG: delay_access
-# This is used to determine which delay pool a request falls into.
-#
-# delay_access is sorted per pool and the matching starts with pool 1,
-# then pool 2, ..., and finally pool N. The first delay pool where the
-# request is allowed is selected for the request. If it does not allow
-# the request to any pool then the request is not delayed (default).
-#
-# For example, if you want some_big_clients in delay
-# pool 1 and lotsa_little_clients in delay pool 2:
-#
-# delay_access 1 allow some_big_clients
-# delay_access 1 deny all
-# delay_access 2 allow lotsa_little_clients
-# delay_access 2 deny all
-# delay_access 3 allow authenticated_clients
-#
-# See also delay_parameters and delay_class.
-#
-#Default:
-# Deny using the pool, unless allow rules exist in squid.conf for the pool.
-
-# TAG: delay_parameters
-# This defines the parameters for a delay pool. Each delay pool has
-# a number of "buckets" associated with it, as explained in the
-# description of delay_class.
-#
-# For a class 1 delay pool, the syntax is:
-# delay_pools pool 1
-# delay_parameters pool aggregate
-#
-# For a class 2 delay pool:
-# delay_pools pool 2
-# delay_parameters pool aggregate individual
-#
-# For a class 3 delay pool:
-# delay_pools pool 3
-# delay_parameters pool aggregate network individual
-#
-# For a class 4 delay pool:
-# delay_pools pool 4
-# delay_parameters pool aggregate network individual user
-#
-# For a class 5 delay pool:
-# delay_pools pool 5
-# delay_parameters pool tagrate
-#
-# The option variables are:
-#
-# pool a pool number - ie, a number between 1 and the
-# number specified in delay_pools as used in
-# delay_class lines.
-#
-# aggregate the speed limit parameters for the aggregate bucket
-# (class 1, 2, 3).
-#
-# individual the speed limit parameters for the individual
-# buckets (class 2, 3).
-#
-# network the speed limit parameters for the network buckets
-# (class 3).
-#
-# user the speed limit parameters for the user buckets
-# (class 4).
-#
-# tagrate the speed limit parameters for the tag buckets
-# (class 5).
-#
-# A pair of delay parameters is written restore/maximum, where restore is
-# the number of bytes (not bits - modem and network speeds are usually
-# quoted in bits) per second placed into the bucket, and maximum is the
-# maximum number of bytes which can be in the bucket at any time.
-#
-# There must be one delay_parameters line for each delay pool.
-#
-#
-# For example, if delay pool number 1 is a class 2 delay pool as in the
-# above example, and is being used to strictly limit each host to 64Kbit/sec
-# (plus overheads), with no overall limit, the line is:
-#
-# delay_parameters 1 -1/-1 8000/8000
-#
-# Note that 8 x 8000 KByte/sec -> 64Kbit/sec.
-#
-# Note that the figure -1 is used to represent "unlimited".
-#
-#
-# And, if delay pool number 2 is a class 3 delay pool as in the above
-# example, and you want to limit it to a total of 256Kbit/sec (strict limit)
-# with each 8-bit network permitted 64Kbit/sec (strict limit) and each
-# individual host permitted 4800bit/sec with a bucket maximum size of 64Kbits
-# to permit a decent web page to be downloaded at a decent speed
-# (if the network is not being limited due to overuse) but slow down
-# large downloads more significantly:
-#
-# delay_parameters 2 32000/32000 8000/8000 600/8000
-#
-# Note that 8 x 32000 KByte/sec -> 256Kbit/sec.
-# 8 x 8000 KByte/sec -> 64Kbit/sec.
-# 8 x 600 Byte/sec -> 4800bit/sec.
-#
-#
-# Finally, for a class 4 delay pool as in the example - each user will
-# be limited to 128Kbits/sec no matter how many workstations they are logged into.:
-#
-# delay_parameters 4 32000/32000 8000/8000 600/64000 16000/16000
-#
-#
-# See also delay_class and delay_access.
-#
-#Default:
-# none
-
-# TAG: delay_initial_bucket_level (percent, 0-100)
-# The initial bucket percentage is used to determine how much is put
-# in each bucket when squid starts, is reconfigured, or first notices
-# a host accessing it (in class 2 and class 3, individual hosts and
-# networks only have buckets associated with them once they have been
-# "seen" by squid).
-#Default:
-# delay_initial_bucket_level 50
-
-# CLIENT DELAY POOL PARAMETERS
-# -----------------------------------------------------------------------------
-
-# TAG: client_delay_pools
-# This option specifies the number of client delay pools used. It must
-# preceed other client_delay_* options.
-#
-# Example:
-# client_delay_pools 2
-#
-# See also client_delay_parameters and client_delay_access.
-#Default:
-# client_delay_pools 0
-
-# TAG: client_delay_initial_bucket_level (percent, 0-no_limit)
-# This option determines the initial bucket size as a percentage of
-# max_bucket_size from client_delay_parameters. Buckets are created
-# at the time of the "first" connection from the matching IP. Idle
-# buckets are periodically deleted up.
-#
-# You can specify more than 100 percent but note that such "oversized"
-# buckets are not refilled until their size goes down to max_bucket_size
-# from client_delay_parameters.
-#
-# Example:
-# client_delay_initial_bucket_level 50
-#Default:
-# client_delay_initial_bucket_level 50
-
-# TAG: client_delay_parameters
-#
-# This option configures client-side bandwidth limits using the
-# following format:
-#
-# client_delay_parameters pool speed_limit max_bucket_size
-#
-# pool is an integer ID used for client_delay_access matching.
-#
-# speed_limit is bytes added to the bucket per second.
-#
-# max_bucket_size is the maximum size of a bucket, enforced after any
-# speed_limit additions.
-#
-# Please see the delay_parameters option for more information and
-# examples.
-#
-# Example:
-# client_delay_parameters 1 1024 2048
-# client_delay_parameters 2 51200 16384
-#
-# See also client_delay_access.
-#
-#Default:
-# none
-
-# TAG: client_delay_access
-# This option determines the client-side delay pool for the
-# request:
-#
-# client_delay_access pool_ID allow|deny acl_name
-#
-# All client_delay_access options are checked in their pool ID
-# order, starting with pool 1. The first checked pool with allowed
-# request is selected for the request. If no ACL matches or there
-# are no client_delay_access options, the request bandwidth is not
-# limited.
-#
-# The ACL-selected pool is then used to find the
-# client_delay_parameters for the request. Client-side pools are
-# not used to aggregate clients. Clients are always aggregated
-# based on their source IP addresses (one bucket per source IP).
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-# Additionally, only the client TCP connection details are available.
-# ACLs testing HTTP properties will not work.
-#
-# Please see delay_access for more examples.
-#
-# Example:
-# client_delay_access 1 allow low_rate_network
-# client_delay_access 2 allow vips_network
-#
-#
-# See also client_delay_parameters and client_delay_pools.
-#Default:
-# Deny use of the pool, unless allow rules exist in squid.conf for the pool.
-
-# WCCPv1 AND WCCPv2 CONFIGURATION OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: wccp_router
-# Use this option to define your WCCP ``home'' router for
-# Squid.
-#
-# wccp_router supports a single WCCP(v1) router
-#
-# wccp2_router supports multiple WCCPv2 routers
-#
-# only one of the two may be used at the same time and defines
-# which version of WCCP to use.
-#Default:
-# WCCP disabled.
-
-# TAG: wccp2_router
-# Use this option to define your WCCP ``home'' router for
-# Squid.
-#
-# wccp_router supports a single WCCP(v1) router
-#
-# wccp2_router supports multiple WCCPv2 routers
-#
-# only one of the two may be used at the same time and defines
-# which version of WCCP to use.
-#Default:
-# WCCPv2 disabled.
-
-# TAG: wccp_version
-# This directive is only relevant if you need to set up WCCP(v1)
-# to some very old and end-of-life Cisco routers. In all other
-# setups it must be left unset or at the default setting.
-# It defines an internal version in the WCCP(v1) protocol,
-# with version 4 being the officially documented protocol.
-#
-# According to some users, Cisco IOS 11.2 and earlier only
-# support WCCP version 3. If you're using that or an earlier
-# version of IOS, you may need to change this value to 3, otherwise
-# do not specify this parameter.
-#Default:
-# wccp_version 4
-
-# TAG: wccp2_rebuild_wait
-# If this is enabled Squid will wait for the cache dir rebuild to finish
-# before sending the first wccp2 HereIAm packet
-#Default:
-# wccp2_rebuild_wait on
-
-# TAG: wccp2_forwarding_method
-# WCCP2 allows the setting of forwarding methods between the
-# router/switch and the cache. Valid values are as follows:
-#
-# gre - GRE encapsulation (forward the packet in a GRE/WCCP tunnel)
-# l2 - L2 redirect (forward the packet using Layer 2/MAC rewriting)
-#
-# Currently (as of IOS 12.4) cisco routers only support GRE.
-# Cisco switches only support the L2 redirect assignment method.
-#Default:
-# wccp2_forwarding_method gre
-
-# TAG: wccp2_return_method
-# WCCP2 allows the setting of return methods between the
-# router/switch and the cache for packets that the cache
-# decides not to handle. Valid values are as follows:
-#
-# gre - GRE encapsulation (forward the packet in a GRE/WCCP tunnel)
-# l2 - L2 redirect (forward the packet using Layer 2/MAC rewriting)
-#
-# Currently (as of IOS 12.4) cisco routers only support GRE.
-# Cisco switches only support the L2 redirect assignment.
-#
-# If the "ip wccp redirect exclude in" command has been
-# enabled on the cache interface, then it is still safe for
-# the proxy server to use a l2 redirect method even if this
-# option is set to GRE.
-#Default:
-# wccp2_return_method gre
-
-# TAG: wccp2_assignment_method
-# WCCP2 allows the setting of methods to assign the WCCP hash
-# Valid values are as follows:
-#
-# hash - Hash assignment
-# mask - Mask assignment
-#
-# As a general rule, cisco routers support the hash assignment method
-# and cisco switches support the mask assignment method.
-#Default:
-# wccp2_assignment_method hash
-
-# TAG: wccp2_service
-# WCCP2 allows for multiple traffic services. There are two
-# types: "standard" and "dynamic". The standard type defines
-# one service id - http (id 0). The dynamic service ids can be from
-# 51 to 255 inclusive. In order to use a dynamic service id
-# one must define the type of traffic to be redirected; this is done
-# using the wccp2_service_info option.
-#
-# The "standard" type does not require a wccp2_service_info option,
-# just specifying the service id will suffice.
-#
-# MD5 service authentication can be enabled by adding
-# "password=<password>" to the end of this service declaration.
-#
-# Examples:
-#
-# wccp2_service standard 0 # for the 'web-cache' standard service
-# wccp2_service dynamic 80 # a dynamic service type which will be
-# # fleshed out with subsequent options.
-# wccp2_service standard 0 password=foo
-#Default:
-# Use the 'web-cache' standard service.
-
-# TAG: wccp2_service_info
-# Dynamic WCCPv2 services require further information to define the
-# traffic you wish to have diverted.
-#
-# The format is:
-#
-# wccp2_service_info <id> protocol=<protocol> flags=<flag>,<flag>..
-# priority=<priority> ports=<port>,<port>..
-#
-# The relevant WCCPv2 flags:
-# + src_ip_hash, dst_ip_hash
-# + source_port_hash, dst_port_hash
-# + src_ip_alt_hash, dst_ip_alt_hash
-# + src_port_alt_hash, dst_port_alt_hash
-# + ports_source
-#
-# The port list can be one to eight entries.
-#
-# Example:
-#
-# wccp2_service_info 80 protocol=tcp flags=src_ip_hash,ports_source
-# priority=240 ports=80
-#
-# Note: the service id must have been defined by a previous
-# 'wccp2_service dynamic <id>' entry.
-#Default:
-# none
-
-# TAG: wccp2_weight
-# Each cache server gets assigned a set of the destination
-# hash proportional to their weight.
-#Default:
-# wccp2_weight 10000
-
-# TAG: wccp_address
-# Use this option if you require WCCPv2 to use a specific
-# interface address.
-#
-# The default behavior is to not bind to any specific address.
-#Default:
-# Address selected by the operating system.
-
-# TAG: wccp2_address
-# Use this option if you require WCCP to use a specific
-# interface address.
-#
-# The default behavior is to not bind to any specific address.
-#Default:
-# Address selected by the operating system.
-
-# PERSISTENT CONNECTION HANDLING
-# -----------------------------------------------------------------------------
-#
-# Also see "pconn_timeout" in the TIMEOUTS section
-
-# TAG: client_persistent_connections
-# Persistent connection support for clients.
-# Squid uses persistent connections (when allowed). You can use
-# this option to disable persistent connections with clients.
-#Default:
-# client_persistent_connections on
-
-# TAG: server_persistent_connections
-# Persistent connection support for servers.
-# Squid uses persistent connections (when allowed). You can use
-# this option to disable persistent connections with servers.
-#Default:
-# server_persistent_connections on
-
-# TAG: persistent_connection_after_error
-# With this directive the use of persistent connections after
-# HTTP errors can be disabled. Useful if you have clients
-# who fail to handle errors on persistent connections proper.
-#Default:
-# persistent_connection_after_error on
-
-# TAG: detect_broken_pconn
-# Some servers have been found to incorrectly signal the use
-# of HTTP/1.0 persistent connections even on replies not
-# compatible, causing significant delays. This server problem
-# has mostly been seen on redirects.
-#
-# By enabling this directive Squid attempts to detect such
-# broken replies and automatically assume the reply is finished
-# after 10 seconds timeout.
-#Default:
-# detect_broken_pconn off
-
-# CACHE DIGEST OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: digest_generation
-# This controls whether the server will generate a Cache Digest
-# of its contents. By default, Cache Digest generation is
-# enabled if Squid is compiled with --enable-cache-digests defined.
-#Default:
-# digest_generation on
-
-# TAG: digest_bits_per_entry
-# This is the number of bits of the server's Cache Digest which
-# will be associated with the Digest entry for a given HTTP
-# Method and URL (public key) combination. The default is 5.
-#Default:
-# digest_bits_per_entry 5
-
-# TAG: digest_rebuild_period (seconds)
-# This is the wait time between Cache Digest rebuilds.
-#Default:
-# digest_rebuild_period 1 hour
-
-# TAG: digest_rewrite_period (seconds)
-# This is the wait time between Cache Digest writes to
-# disk.
-#Default:
-# digest_rewrite_period 1 hour
-
-# TAG: digest_swapout_chunk_size (bytes)
-# This is the number of bytes of the Cache Digest to write to
-# disk at a time. It defaults to 4096 bytes (4KB), the Squid
-# default swap page.
-#Default:
-# digest_swapout_chunk_size 4096 bytes
-
-# TAG: digest_rebuild_chunk_percentage (percent, 0-100)
-# This is the percentage of the Cache Digest to be scanned at a
-# time. By default it is set to 10% of the Cache Digest.
-#Default:
-# digest_rebuild_chunk_percentage 10
-
-# SNMP OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: snmp_port
-# The port number where Squid listens for SNMP requests. To enable
-# SNMP support set this to a suitable port number. Port number
-# 3401 is often used for the Squid SNMP agent. By default it's
-# set to "0" (disabled)
-#
-# Example:
-# snmp_port 3401
-#Default:
-# SNMP disabled.
-
-# TAG: snmp_access
-# Allowing or denying access to the SNMP port.
-#
-# All access to the agent is denied by default.
-# usage:
-#
-# snmp_access allow|deny [!]aclname ...
-#
-# This clause only supports fast acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#
-#Example:
-# snmp_access allow snmppublic localhost
-# snmp_access deny all
-#Default:
-# Deny, unless rules exist in squid.conf.
-
-# TAG: snmp_incoming_address
-# Just like 'udp_incoming_address', but for the SNMP port.
-#
-# snmp_incoming_address is used for the SNMP socket receiving
-# messages from SNMP agents.
-#
-# The default snmp_incoming_address is to listen on all
-# available network interfaces.
-#Default:
-# Accept SNMP packets from all machine interfaces.
-
-# TAG: snmp_outgoing_address
-# Just like 'udp_outgoing_address', but for the SNMP port.
-#
-# snmp_outgoing_address is used for SNMP packets returned to SNMP
-# agents.
-#
-# If snmp_outgoing_address is not set it will use the same socket
-# as snmp_incoming_address. Only change this if you want to have
-# SNMP replies sent using another address than where this Squid
-# listens for SNMP queries.
-#
-# NOTE, snmp_incoming_address and snmp_outgoing_address can not have
-# the same value since they both use the same port.
-#Default:
-# Use snmp_incoming_address or an address selected by the operating system.
-
-# ICP OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: icp_port
-# The port number where Squid sends and receives ICP queries to
-# and from neighbor caches. The standard UDP port for ICP is 3130.
-#
-# Example:
-# icp_port 3130
-#Default:
-# ICP disabled.
-
-# TAG: htcp_port
-# The port number where Squid sends and receives HTCP queries to
-# and from neighbor caches. To turn it on you want to set it to
-# 4827.
-#
-# Example:
-# htcp_port 4827
-#Default:
-# HTCP disabled.
-
-# TAG: log_icp_queries on|off
-# If set, ICP queries are logged to access.log. You may wish
-# do disable this if your ICP load is VERY high to speed things
-# up or to simplify log analysis.
-#Default:
-# log_icp_queries on
-
-# TAG: udp_incoming_address
-# udp_incoming_address is used for UDP packets received from other
-# caches.
-#
-# The default behavior is to not bind to any specific address.
-#
-# Only change this if you want to have all UDP queries received on
-# a specific interface/address.
-#
-# NOTE: udp_incoming_address is used by the ICP, HTCP, and DNS
-# modules. Altering it will affect all of them in the same manner.
-#
-# see also; udp_outgoing_address
-#
-# NOTE, udp_incoming_address and udp_outgoing_address can not
-# have the same value since they both use the same port.
-#Default:
-# Accept packets from all machine interfaces.
-
-# TAG: udp_outgoing_address
-# udp_outgoing_address is used for UDP packets sent out to other
-# caches.
-#
-# The default behavior is to not bind to any specific address.
-#
-# Instead it will use the same socket as udp_incoming_address.
-# Only change this if you want to have UDP queries sent using another
-# address than where this Squid listens for UDP queries from other
-# caches.
-#
-# NOTE: udp_outgoing_address is used by the ICP, HTCP, and DNS
-# modules. Altering it will affect all of them in the same manner.
-#
-# see also; udp_incoming_address
-#
-# NOTE, udp_incoming_address and udp_outgoing_address can not
-# have the same value since they both use the same port.
-#Default:
-# Use udp_incoming_address or an address selected by the operating system.
-
-# TAG: icp_hit_stale on|off
-# If you want to return ICP_HIT for stale cache objects, set this
-# option to 'on'. If you have sibling relationships with caches
-# in other administrative domains, this should be 'off'. If you only
-# have sibling relationships with caches under your control,
-# it is probably okay to set this to 'on'.
-# If set to 'on', your siblings should use the option "allow-miss"
-# on their cache_peer lines for connecting to you.
-#Default:
-# icp_hit_stale off
-
-# TAG: minimum_direct_hops
-# If using the ICMP pinging stuff, do direct fetches for sites
-# which are no more than this many hops away.
-#Default:
-# minimum_direct_hops 4
-
-# TAG: minimum_direct_rtt (msec)
-# If using the ICMP pinging stuff, do direct fetches for sites
-# which are no more than this many rtt milliseconds away.
-#Default:
-# minimum_direct_rtt 400
-
-# TAG: netdb_low
-# The low water mark for the ICMP measurement database.
-#
-# Note: high watermark controlled by netdb_high directive.
-#
-# These watermarks are counts, not percents. The defaults are
-# (low) 900 and (high) 1000. When the high water mark is
-# reached, database entries will be deleted until the low
-# mark is reached.
-#Default:
-# netdb_low 900
-
-# TAG: netdb_high
-# The high water mark for the ICMP measurement database.
-#
-# Note: low watermark controlled by netdb_low directive.
-#
-# These watermarks are counts, not percents. The defaults are
-# (low) 900 and (high) 1000. When the high water mark is
-# reached, database entries will be deleted until the low
-# mark is reached.
-#Default:
-# netdb_high 1000
-
-# TAG: netdb_ping_period
-# The minimum period for measuring a site. There will be at
-# least this much delay between successive pings to the same
-# network. The default is five minutes.
-#Default:
-# netdb_ping_period 5 minutes
-
-# TAG: query_icmp on|off
-# If you want to ask your peers to include ICMP data in their ICP
-# replies, enable this option.
-#
-# If your peer has configured Squid (during compilation) with
-# '--enable-icmp' that peer will send ICMP pings to origin server
-# sites of the URLs it receives. If you enable this option the
-# ICP replies from that peer will include the ICMP data (if available).
-# Then, when choosing a parent cache, Squid will choose the parent with
-# the minimal RTT to the origin server. When this happens, the
-# hierarchy field of the access.log will be
-# "CLOSEST_PARENT_MISS". This option is off by default.
-#Default:
-# query_icmp off
-
-# TAG: test_reachability on|off
-# When this is 'on', ICP MISS replies will be ICP_MISS_NOFETCH
-# instead of ICP_MISS if the target host is NOT in the ICMP
-# database, or has a zero RTT.
-#Default:
-# test_reachability off
-
-# TAG: icp_query_timeout (msec)
-# Normally Squid will automatically determine an optimal ICP
-# query timeout value based on the round-trip-time of recent ICP
-# queries. If you want to override the value determined by
-# Squid, set this 'icp_query_timeout' to a non-zero value. This
-# value is specified in MILLISECONDS, so, to use a 2-second
-# timeout (the old default), you would write:
-#
-# icp_query_timeout 2000
-#Default:
-# Dynamic detection.
-
-# TAG: maximum_icp_query_timeout (msec)
-# Normally the ICP query timeout is determined dynamically. But
-# sometimes it can lead to very large values (say 5 seconds).
-# Use this option to put an upper limit on the dynamic timeout
-# value. Do NOT use this option to always use a fixed (instead
-# of a dynamic) timeout value. To set a fixed timeout see the
-# 'icp_query_timeout' directive.
-#Default:
-# maximum_icp_query_timeout 2000
-
-# TAG: minimum_icp_query_timeout (msec)
-# Normally the ICP query timeout is determined dynamically. But
-# sometimes it can lead to very small timeouts, even lower than
-# the normal latency variance on your link due to traffic.
-# Use this option to put an lower limit on the dynamic timeout
-# value. Do NOT use this option to always use a fixed (instead
-# of a dynamic) timeout value. To set a fixed timeout see the
-# 'icp_query_timeout' directive.
-#Default:
-# minimum_icp_query_timeout 5
-
-# TAG: background_ping_rate time-units
-# Controls how often the ICP pings are sent to siblings that
-# have background-ping set.
-#Default:
-# background_ping_rate 10 seconds
-
-# MULTICAST ICP OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: mcast_groups
-# This tag specifies a list of multicast groups which your server
-# should join to receive multicasted ICP queries.
-#
-# NOTE! Be very careful what you put here! Be sure you
-# understand the difference between an ICP _query_ and an ICP
-# _reply_. This option is to be set only if you want to RECEIVE
-# multicast queries. Do NOT set this option to SEND multicast
-# ICP (use cache_peer for that). ICP replies are always sent via
-# unicast, so this option does not affect whether or not you will
-# receive replies from multicast group members.
-#
-# You must be very careful to NOT use a multicast address which
-# is already in use by another group of caches.
-#
-# If you are unsure about multicast, please read the Multicast
-# chapter in the Squid FAQ (http://www.squid-cache.org/FAQ/).
-#
-# Usage: mcast_groups 239.128.16.128 224.0.1.20
-#
-# By default, Squid doesn't listen on any multicast groups.
-#Default:
-# none
-
-# TAG: mcast_miss_addr
-# Note: This option is only available if Squid is rebuilt with the
-# -DMULTICAST_MISS_STREAM define
-#
-# If you enable this option, every "cache miss" URL will
-# be sent out on the specified multicast address.
-#
-# Do not enable this option unless you are are absolutely
-# certain you understand what you are doing.
-#Default:
-# disabled.
-
-# TAG: mcast_miss_ttl
-# Note: This option is only available if Squid is rebuilt with the
-# -DMULTICAST_MISS_STREAM define
-#
-# This is the time-to-live value for packets multicasted
-# when multicasting off cache miss URLs is enabled. By
-# default this is set to 'site scope', i.e. 16.
-#Default:
-# mcast_miss_ttl 16
-
-# TAG: mcast_miss_port
-# Note: This option is only available if Squid is rebuilt with the
-# -DMULTICAST_MISS_STREAM define
-#
-# This is the port number to be used in conjunction with
-# 'mcast_miss_addr'.
-#Default:
-# mcast_miss_port 3135
-
-# TAG: mcast_miss_encode_key
-# Note: This option is only available if Squid is rebuilt with the
-# -DMULTICAST_MISS_STREAM define
-#
-# The URLs that are sent in the multicast miss stream are
-# encrypted. This is the encryption key.
-#Default:
-# mcast_miss_encode_key XXXXXXXXXXXXXXXX
-
-# TAG: mcast_icp_query_timeout (msec)
-# For multicast peers, Squid regularly sends out ICP "probes" to
-# count how many other peers are listening on the given multicast
-# address. This value specifies how long Squid should wait to
-# count all the replies. The default is 2000 msec, or 2
-# seconds.
-#Default:
-# mcast_icp_query_timeout 2000
-
-# INTERNAL ICON OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: icon_directory
-# Where the icons are stored. These are normally kept in
-# /usr/share/squid3/icons
-#Default:
-# icon_directory /usr/share/squid3/icons
-
-# TAG: global_internal_static
-# This directive controls is Squid should intercept all requests for
-# /squid-internal-static/ no matter which host the URL is requesting
-# (default on setting), or if nothing special should be done for
-# such URLs (off setting). The purpose of this directive is to make
-# icons etc work better in complex cache hierarchies where it may
-# not always be possible for all corners in the cache mesh to reach
-# the server generating a directory listing.
-#Default:
-# global_internal_static on
-
-# TAG: short_icon_urls
-# If this is enabled Squid will use short URLs for icons.
-# If disabled it will revert to the old behavior of including
-# it's own name and port in the URL.
-#
-# If you run a complex cache hierarchy with a mix of Squid and
-# other proxies you may need to disable this directive.
-#Default:
-# short_icon_urls on
-
-# ERROR PAGE OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: error_directory
-# If you wish to create your own versions of the default
-# error files to customize them to suit your company copy
-# the error/template files to another directory and point
-# this tag at them.
-#
-# WARNING: This option will disable multi-language support
-# on error pages if used.
-#
-# The squid developers are interested in making squid available in
-# a wide variety of languages. If you are making translations for a
-# language that Squid does not currently provide please consider
-# contributing your translation back to the project.
-# http://wiki.squid-cache.org/Translations
-#
-# The squid developers working on translations are happy to supply drop-in
-# translated error files in exchange for any new language contributions.
-#Default:
-# Send error pages in the clients preferred language
-
-# TAG: error_default_language
-# Set the default language which squid will send error pages in
-# if no existing translation matches the clients language
-# preferences.
-#
-# If unset (default) generic English will be used.
-#
-# The squid developers are interested in making squid available in
-# a wide variety of languages. If you are interested in making
-# translations for any language see the squid wiki for details.
-# http://wiki.squid-cache.org/Translations
-#Default:
-# Generate English language pages.
-
-# TAG: error_log_languages
-# Log to cache.log what languages users are attempting to
-# auto-negotiate for translations.
-#
-# Successful negotiations are not logged. Only failures
-# have meaning to indicate that Squid may need an upgrade
-# of its error page translations.
-#Default:
-# error_log_languages on
-
-# TAG: err_page_stylesheet
-# CSS Stylesheet to pattern the display of Squid default error pages.
-#
-# For information on CSS see http://www.w3.org/Style/CSS/
-#Default:
-# err_page_stylesheet /etc/squid3/errorpage.css
-
-# TAG: err_html_text
-# HTML text to include in error messages. Make this a "mailto"
-# URL to your admin address, or maybe just a link to your
-# organizations Web page.
-#
-# To include this in your error messages, you must rewrite
-# the error template files (found in the "errors" directory).
-# Wherever you want the 'err_html_text' line to appear,
-# insert a %L tag in the error template file.
-#Default:
-# none
-
-# TAG: email_err_data on|off
-# If enabled, information about the occurred error will be
-# included in the mailto links of the ERR pages (if %W is set)
-# so that the email body contains the data.
-# Syntax is <A HREF="mailto:%w%W">%w</A>
-#Default:
-# email_err_data on
-
-# TAG: deny_info
-# Usage: deny_info err_page_name acl
-# or deny_info http://... acl
-# or deny_info TCP_RESET acl
-#
-# This can be used to return a ERR_ page for requests which
-# do not pass the 'http_access' rules. Squid remembers the last
-# acl it evaluated in http_access, and if a 'deny_info' line exists
-# for that ACL Squid returns a corresponding error page.
-#
-# The acl is typically the last acl on the http_access deny line which
-# denied access. The exceptions to this rule are:
-# - When Squid needs to request authentication credentials. It's then
-# the first authentication related acl encountered
-# - When none of the http_access lines matches. It's then the last
-# acl processed on the last http_access line.
-# - When the decision to deny access was made by an adaptation service,
-# the acl name is the corresponding eCAP or ICAP service_name.
-#
-# NP: If providing your own custom error pages with error_directory
-# you may also specify them by your custom file name:
-# Example: deny_info ERR_CUSTOM_ACCESS_DENIED bad_guys
-#
-# By defaut Squid will send "403 Forbidden". A different 4xx or 5xx
-# may be specified by prefixing the file name with the code and a colon.
-# e.g. 404:ERR_CUSTOM_ACCESS_DENIED
-#
-# Alternatively you can tell Squid to reset the TCP connection
-# by specifying TCP_RESET.
-#
-# Or you can specify an error URL or URL pattern. The browsers will
-# get redirected to the specified URL after formatting tags have
-# been replaced. Redirect will be done with 302 or 307 according to
-# HTTP/1.1 specs. A different 3xx code may be specified by prefixing
-# the URL. e.g. 303:http://example.com/
-#
-# URL FORMAT TAGS:
-# %a - username (if available. Password NOT included)
-# %B - FTP path URL
-# %e - Error number
-# %E - Error description
-# %h - Squid hostname
-# %H - Request domain name
-# %i - Client IP Address
-# %M - Request Method
-# %o - Message result from external ACL helper
-# %p - Request Port number
-# %P - Request Protocol name
-# %R - Request URL path
-# %T - Timestamp in RFC 1123 format
-# %U - Full canonical URL from client
-# (HTTPS URLs terminate with *)
-# %u - Full canonical URL from client
-# %w - Admin email from squid.conf
-# %x - Error name
-# %% - Literal percent (%) code
-#
-#Default:
-# none
-
-# OPTIONS INFLUENCING REQUEST FORWARDING
-# -----------------------------------------------------------------------------
-
-# TAG: nonhierarchical_direct
-# By default, Squid will send any non-hierarchical requests
-# (matching hierarchy_stoplist or not cacheable request type) direct
-# to origin servers.
-#
-# When this is set to "off", Squid will prefer to send these
-# requests to parents.
-#
-# Note that in most configurations, by turning this off you will only
-# add latency to these request without any improvement in global hit
-# ratio.
-#
-# This option only sets a preference. If the parent is unavailable a
-# direct connection to the origin server may still be attempted. To
-# completely prevent direct connections use never_direct.
-#Default:
-# nonhierarchical_direct on
-
-# TAG: prefer_direct
-# Normally Squid tries to use parents for most requests. If you for some
-# reason like it to first try going direct and only use a parent if
-# going direct fails set this to on.
-#
-# By combining nonhierarchical_direct off and prefer_direct on you
-# can set up Squid to use a parent as a backup path if going direct
-# fails.
-#
-# Note: If you want Squid to use parents for all requests see
-# the never_direct directive. prefer_direct only modifies how Squid
-# acts on cacheable requests.
-#Default:
-# prefer_direct off
-
-# TAG: always_direct
-# Usage: always_direct allow|deny [!]aclname ...
-#
-# Here you can use ACL elements to specify requests which should
-# ALWAYS be forwarded by Squid to the origin servers without using
-# any peers. For example, to always directly forward requests for
-# local servers ignoring any parents or siblings you may have use
-# something like:
-#
-# acl local-servers dstdomain my.domain.net
-# always_direct allow local-servers
-#
-# To always forward FTP requests directly, use
-#
-# acl FTP proto FTP
-# always_direct allow FTP
-#
-# NOTE: There is a similar, but opposite option named
-# 'never_direct'. You need to be aware that "always_direct deny
-# foo" is NOT the same thing as "never_direct allow foo". You
-# may need to use a deny rule to exclude a more-specific case of
-# some other rule. Example:
-#
-# acl local-external dstdomain external.foo.net
-# acl local-servers dstdomain .foo.net
-# always_direct deny local-external
-# always_direct allow local-servers
-#
-# NOTE: If your goal is to make the client forward the request
-# directly to the origin server bypassing Squid then this needs
-# to be done in the client configuration. Squid configuration
-# can only tell Squid how Squid should fetch the object.
-#
-# NOTE: This directive is not related to caching. The replies
-# is cached as usual even if you use always_direct. To not cache
-# the replies see the 'cache' directive.
-#
-# This clause supports both fast and slow acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#Default:
-# Prevent any cache_peer being used for this request.
-
-# TAG: never_direct
-# Usage: never_direct allow|deny [!]aclname ...
-#
-# never_direct is the opposite of always_direct. Please read
-# the description for always_direct if you have not already.
-#
-# With 'never_direct' you can use ACL elements to specify
-# requests which should NEVER be forwarded directly to origin
-# servers. For example, to force the use of a proxy for all
-# requests, except those in your local domain use something like:
-#
-# acl local-servers dstdomain .foo.net
-# never_direct deny local-servers
-# never_direct allow all
-#
-# or if Squid is inside a firewall and there are local intranet
-# servers inside the firewall use something like:
-#
-# acl local-intranet dstdomain .foo.net
-# acl local-external dstdomain external.foo.net
-# always_direct deny local-external
-# always_direct allow local-intranet
-# never_direct allow all
-#
-# This clause supports both fast and slow acl types.
-# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
-#Default:
-# Allow DNS results to be used for this request.
-
-# ADVANCED NETWORKING OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: incoming_udp_average
-# Heavy voodoo here. I can't even believe you are reading this.
-# Are you crazy? Don't even think about adjusting these unless
-# you understand the algorithms in comm_select.c first!
-#Default:
-# incoming_udp_average 6
-
-# TAG: incoming_tcp_average
-# Heavy voodoo here. I can't even believe you are reading this.
-# Are you crazy? Don't even think about adjusting these unless
-# you understand the algorithms in comm_select.c first!
-#Default:
-# incoming_tcp_average 4
-
-# TAG: incoming_dns_average
-# Heavy voodoo here. I can't even believe you are reading this.
-# Are you crazy? Don't even think about adjusting these unless
-# you understand the algorithms in comm_select.c first!
-#Default:
-# incoming_dns_average 4
-
-# TAG: min_udp_poll_cnt
-# Heavy voodoo here. I can't even believe you are reading this.
-# Are you crazy? Don't even think about adjusting these unless
-# you understand the algorithms in comm_select.c first!
-#Default:
-# min_udp_poll_cnt 8
-
-# TAG: min_dns_poll_cnt
-# Heavy voodoo here. I can't even believe you are reading this.
-# Are you crazy? Don't even think about adjusting these unless
-# you understand the algorithms in comm_select.c first!
-#Default:
-# min_dns_poll_cnt 8
-
-# TAG: min_tcp_poll_cnt
-# Heavy voodoo here. I can't even believe you are reading this.
-# Are you crazy? Don't even think about adjusting these unless
-# you understand the algorithms in comm_select.c first!
-#Default:
-# min_tcp_poll_cnt 8
-
-# TAG: accept_filter
-# FreeBSD:
-#
-# The name of an accept(2) filter to install on Squid's
-# listen socket(s). This feature is perhaps specific to
-# FreeBSD and requires support in the kernel.
-#
-# The 'httpready' filter delays delivering new connections
-# to Squid until a full HTTP request has been received.
-# See the accf_http(9) man page for details.
-#
-# The 'dataready' filter delays delivering new connections
-# to Squid until there is some data to process.
-# See the accf_dataready(9) man page for details.
-#
-# Linux:
-#
-# The 'data' filter delays delivering of new connections
-# to Squid until there is some data to process by TCP_ACCEPT_DEFER.
-# You may optionally specify a number of seconds to wait by
-# 'data=N' where N is the number of seconds. Defaults to 30
-# if not specified. See the tcp(7) man page for details.
-#EXAMPLE:
-## FreeBSD
-#accept_filter httpready
-## Linux
-#accept_filter data
-#Default:
-# none
-
-# TAG: client_ip_max_connections
-# Set an absolute limit on the number of connections a single
-# client IP can use. Any more than this and Squid will begin to drop
-# new connections from the client until it closes some links.
-#
-# Note that this is a global limit. It affects all HTTP, HTCP, Gopher and FTP
-# connections from the client. For finer control use the ACL access controls.
-#
-# Requires client_db to be enabled (the default).
-#
-# WARNING: This may noticably slow down traffic received via external proxies
-# or NAT devices and cause them to rebound error messages back to their clients.
-#Default:
-# No limit.
-
-# TAG: tcp_recv_bufsize (bytes)
-# Size of receive buffer to set for TCP sockets. Probably just
-# as easy to change your kernel's default.
-# Omit from squid.conf to use the default buffer size.
-#Default:
-# Use operating system TCP defaults.
-
-# ICAP OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: icap_enable on|off
-# If you want to enable the ICAP module support, set this to on.
-#Default:
-# icap_enable off
-
-# TAG: icap_connect_timeout
-# This parameter specifies how long to wait for the TCP connect to
-# the requested ICAP server to complete before giving up and either
-# terminating the HTTP transaction or bypassing the failure.
-#
-# The default for optional services is peer_connect_timeout.
-# The default for essential services is connect_timeout.
-# If this option is explicitly set, its value applies to all services.
-#Default:
-# none
-
-# TAG: icap_io_timeout time-units
-# This parameter specifies how long to wait for an I/O activity on
-# an established, active ICAP connection before giving up and
-# either terminating the HTTP transaction or bypassing the
-# failure.
-#Default:
-# Use read_timeout.
-
-# TAG: icap_service_failure_limit limit [in memory-depth time-units]
-# The limit specifies the number of failures that Squid tolerates
-# when establishing a new TCP connection with an ICAP service. If
-# the number of failures exceeds the limit, the ICAP service is
-# not used for new ICAP requests until it is time to refresh its
-# OPTIONS.
-#
-# A negative value disables the limit. Without the limit, an ICAP
-# service will not be considered down due to connectivity failures
-# between ICAP OPTIONS requests.
-#
-# Squid forgets ICAP service failures older than the specified
-# value of memory-depth. The memory fading algorithm
-# is approximate because Squid does not remember individual
-# errors but groups them instead, splitting the option
-# value into ten time slots of equal length.
-#
-# When memory-depth is 0 and by default this option has no
-# effect on service failure expiration.
-#
-# Squid always forgets failures when updating service settings
-# using an ICAP OPTIONS transaction, regardless of this option
-# setting.
-#
-# For example,
-# # suspend service usage after 10 failures in 5 seconds:
-# icap_service_failure_limit 10 in 5 seconds
-#Default:
-# icap_service_failure_limit 10
-
-# TAG: icap_service_revival_delay
-# The delay specifies the number of seconds to wait after an ICAP
-# OPTIONS request failure before requesting the options again. The
-# failed ICAP service is considered "down" until fresh OPTIONS are
-# fetched.
-#
-# The actual delay cannot be smaller than the hardcoded minimum
-# delay of 30 seconds.
-#Default:
-# icap_service_revival_delay 180
-
-# TAG: icap_preview_enable on|off
-# The ICAP Preview feature allows the ICAP server to handle the
-# HTTP message by looking only at the beginning of the message body
-# or even without receiving the body at all. In some environments,
-# previews greatly speedup ICAP processing.
-#
-# During an ICAP OPTIONS transaction, the server may tell Squid what
-# HTTP messages should be previewed and how big the preview should be.
-# Squid will not use Preview if the server did not request one.
-#
-# To disable ICAP Preview for all ICAP services, regardless of
-# individual ICAP server OPTIONS responses, set this option to "off".
-#Example:
-#icap_preview_enable off
-#Default:
-# icap_preview_enable on
-
-# TAG: icap_preview_size
-# The default size of preview data to be sent to the ICAP server.
-# This value might be overwritten on a per server basis by OPTIONS requests.
-#Default:
-# No preview sent.
-
-# TAG: icap_206_enable on|off
-# 206 (Partial Content) responses is an ICAP extension that allows the
-# ICAP agents to optionally combine adapted and original HTTP message
-# content. The decision to combine is postponed until the end of the
-# ICAP response. Squid supports Partial Content extension by default.
-#
-# Activation of the Partial Content extension is negotiated with each
-# ICAP service during OPTIONS exchange. Most ICAP servers should handle
-# negotation correctly even if they do not support the extension, but
-# some might fail. To disable Partial Content support for all ICAP
-# services and to avoid any negotiation, set this option to "off".
-#
-# Example:
-# icap_206_enable off
-#Default:
-# icap_206_enable on
-
-# TAG: icap_default_options_ttl
-# The default TTL value for ICAP OPTIONS responses that don't have
-# an Options-TTL header.
-#Default:
-# icap_default_options_ttl 60
-
-# TAG: icap_persistent_connections on|off
-# Whether or not Squid should use persistent connections to
-# an ICAP server.
-#Default:
-# icap_persistent_connections on
-
-# TAG: adaptation_send_client_ip on|off
-# If enabled, Squid shares HTTP client IP information with adaptation
-# services. For ICAP, Squid adds the X-Client-IP header to ICAP requests.
-# For eCAP, Squid sets the libecap::metaClientIp transaction option.
-#
-# See also: adaptation_uses_indirect_client
-#Default:
-# adaptation_send_client_ip off
-
-# TAG: adaptation_send_username on|off
-# This sends authenticated HTTP client username (if available) to
-# the adaptation service.
-#
-# For ICAP, the username value is encoded based on the
-# icap_client_username_encode option and is sent using the header
-# specified by the icap_client_username_header option.
-#Default:
-# adaptation_send_username off
-
-# TAG: icap_client_username_header
-# ICAP request header name to use for adaptation_send_username.
-#Default:
-# icap_client_username_header X-Client-Username
-
-# TAG: icap_client_username_encode on|off
-# Whether to base64 encode the authenticated client username.
-#Default:
-# icap_client_username_encode off
-
-# TAG: icap_service
-# Defines a single ICAP service using the following format:
-#
-# icap_service id vectoring_point uri [option ...]
-#
-# id: ID
-# an opaque identifier or name which is used to direct traffic to
-# this specific service. Must be unique among all adaptation
-# services in squid.conf.
-#
-# vectoring_point: reqmod_precache|reqmod_postcache|respmod_precache|respmod_postcache
-# This specifies at which point of transaction processing the
-# ICAP service should be activated. *_postcache vectoring points
-# are not yet supported.
-#
-# uri: icap://servername:port/servicepath
-# ICAP server and service location.
-#
-# ICAP does not allow a single service to handle both REQMOD and RESPMOD
-# transactions. Squid does not enforce that requirement. You can specify
-# services with the same service_url and different vectoring_points. You
-# can even specify multiple identical services as long as their
-# service_names differ.
-#
-# To activate a service, use the adaptation_access directive. To group
-# services, use adaptation_service_chain and adaptation_service_set.
-#
-# Service options are separated by white space. ICAP services support
-# the following name=value options:
-#
-# bypass=on|off|1|0
-# If set to 'on' or '1', the ICAP service is treated as
-# optional. If the service cannot be reached or malfunctions,
-# Squid will try to ignore any errors and process the message as
-# if the service was not enabled. No all ICAP errors can be
-# bypassed. If set to 0, the ICAP service is treated as
-# essential and all ICAP errors will result in an error page
-# returned to the HTTP client.
-#
-# Bypass is off by default: services are treated as essential.
-#
-# routing=on|off|1|0
-# If set to 'on' or '1', the ICAP service is allowed to
-# dynamically change the current message adaptation plan by
-# returning a chain of services to be used next. The services
-# are specified using the X-Next-Services ICAP response header
-# value, formatted as a comma-separated list of service names.
-# Each named service should be configured in squid.conf. Other
-# services are ignored. An empty X-Next-Services value results
-# in an empty plan which ends the current adaptation.
-#
-# Dynamic adaptation plan may cross or cover multiple supported
-# vectoring points in their natural processing order.
-#
-# Routing is not allowed by default: the ICAP X-Next-Services
-# response header is ignored.
-#
-# ipv6=on|off
-# Only has effect on split-stack systems. The default on those systems
-# is to use IPv4-only connections. When set to 'on' this option will
-# make Squid use IPv6-only connections to contact this ICAP service.
-#
-# on-overload=block|bypass|wait|force
-# If the service Max-Connections limit has been reached, do
-# one of the following for each new ICAP transaction:
-# * block: send an HTTP error response to the client
-# * bypass: ignore the "over-connected" ICAP service
-# * wait: wait (in a FIFO queue) for an ICAP connection slot
-# * force: proceed, ignoring the Max-Connections limit
-#
-# In SMP mode with N workers, each worker assumes the service
-# connection limit is Max-Connections/N, even though not all
-# workers may use a given service.
-#
-# The default value is "bypass" if service is bypassable,
-# otherwise it is set to "wait".
-#
-#
-# max-conn=number
-# Use the given number as the Max-Connections limit, regardless
-# of the Max-Connections value given by the service, if any.
-#
-# Older icap_service format without optional named parameters is
-# deprecated but supported for backward compatibility.
-#
-#Example:
-#icap_service svcBlocker reqmod_precache icap://icap1.mydomain.net:1344/reqmod bypass=0
-#icap_service svcLogger reqmod_precache icap://icap2.mydomain.net:1344/respmod routing=on
-#Default:
-# none
-
-# TAG: icap_class
-# This deprecated option was documented to define an ICAP service
-# chain, even though it actually defined a set of similar, redundant
-# services, and the chains were not supported.
-#
-# To define a set of redundant services, please use the
-# adaptation_service_set directive. For service chains, use
-# adaptation_service_chain.
-#Default:
-# none
-
-# TAG: icap_access
-# This option is deprecated. Please use adaptation_access, which
-# has the same ICAP functionality, but comes with better
-# documentation, and eCAP support.
-#Default:
-# none
-
-# eCAP OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: ecap_enable on|off
-# Controls whether eCAP support is enabled.
-#Default:
-# ecap_enable off
-
-# TAG: ecap_service
-# Defines a single eCAP service
-#
-# ecap_service id vectoring_point uri [option ...]
-#
-# id: ID
-# an opaque identifier or name which is used to direct traffic to
-# this specific service. Must be unique among all adaptation
-# services in squid.conf.
-#
-# vectoring_point: reqmod_precache|reqmod_postcache|respmod_precache|respmod_postcache
-# This specifies at which point of transaction processing the
-# eCAP service should be activated. *_postcache vectoring points
-# are not yet supported.
-#
-# uri: ecap://vendor/service_name?custom&cgi=style&parameters=optional
-# Squid uses the eCAP service URI to match this configuration
-# line with one of the dynamically loaded services. Each loaded
-# eCAP service must have a unique URI. Obtain the right URI from
-# the service provider.
-#
-# To activate a service, use the adaptation_access directive. To group
-# services, use adaptation_service_chain and adaptation_service_set.
-#
-# Service options are separated by white space. eCAP services support
-# the following name=value options:
-#
-# bypass=on|off|1|0
-# If set to 'on' or '1', the eCAP service is treated as optional.
-# If the service cannot be reached or malfunctions, Squid will try
-# to ignore any errors and process the message as if the service
-# was not enabled. No all eCAP errors can be bypassed.
-# If set to 'off' or '0', the eCAP service is treated as essential
-# and all eCAP errors will result in an error page returned to the
-# HTTP client.
-#
-# Bypass is off by default: services are treated as essential.
-#
-# routing=on|off|1|0
-# If set to 'on' or '1', the eCAP service is allowed to
-# dynamically change the current message adaptation plan by
-# returning a chain of services to be used next.
-#
-# Dynamic adaptation plan may cross or cover multiple supported
-# vectoring points in their natural processing order.
-#
-# Routing is not allowed by default.
-#
-# Older ecap_service format without optional named parameters is
-# deprecated but supported for backward compatibility.
-#
-#
-#Example:
-#ecap_service s1 reqmod_precache ecap://filters.R.us/leakDetector?on_error=block bypass=off
-#ecap_service s2 respmod_precache ecap://filters.R.us/virusFilter config=/etc/vf.cfg bypass=on
-#Default:
-# none
-
-# TAG: loadable_modules
-# Instructs Squid to load the specified dynamic module(s) or activate
-# preloaded module(s).
-#Example:
-#loadable_modules /usr/lib/MinimalAdapter.so
-#Default:
-# none
-
-# MESSAGE ADAPTATION OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: adaptation_service_set
-#
-# Configures an ordered set of similar, redundant services. This is
-# useful when hot standby or backup adaptation servers are available.
-#
-# adaptation_service_set set_name service_name1 service_name2 ...
-#
-# The named services are used in the set declaration order. The first
-# applicable adaptation service from the set is used first. The next
-# applicable service is tried if and only if the transaction with the
-# previous service fails and the message waiting to be adapted is still
-# intact.
-#
-# When adaptation starts, broken services are ignored as if they were
-# not a part of the set. A broken service is a down optional service.
-#
-# The services in a set must be attached to the same vectoring point
-# (e.g., pre-cache) and use the same adaptation method (e.g., REQMOD).
-#
-# If all services in a set are optional then adaptation failures are
-# bypassable. If all services in the set are essential, then a
-# transaction failure with one service may still be retried using
-# another service from the set, but when all services fail, the master
-# transaction fails as well.
-#
-# A set may contain a mix of optional and essential services, but that
-# is likely to lead to surprising results because broken services become
-# ignored (see above), making previously bypassable failures fatal.
-# Technically, it is the bypassability of the last failed service that
-# matters.
-#
-# See also: adaptation_access adaptation_service_chain
-#
-#Example:
-#adaptation_service_set svcBlocker urlFilterPrimary urlFilterBackup
-#adaptation service_set svcLogger loggerLocal loggerRemote
-#Default:
-# none
-
-# TAG: adaptation_service_chain
-#
-# Configures a list of complementary services that will be applied
-# one-by-one, forming an adaptation chain or pipeline. This is useful
-# when Squid must perform different adaptations on the same message.
-#
-# adaptation_service_chain chain_name service_name1 svc_name2 ...
-#
-# The named services are used in the chain declaration order. The first
-# applicable adaptation service from the chain is used first. The next
-# applicable service is applied to the successful adaptation results of
-# the previous service in the chain.
-#
-# When adaptation starts, broken services are ignored as if they were
-# not a part of the chain. A broken service is a down optional service.
-#
-# Request satisfaction terminates the adaptation chain because Squid
-# does not currently allow declaration of RESPMOD services at the
-# "reqmod_precache" vectoring point (see icap_service or ecap_service).
-#
-# The services in a chain must be attached to the same vectoring point
-# (e.g., pre-cache) and use the same adaptation method (e.g., REQMOD).
-#
-# A chain may contain a mix of optional and essential services. If an
-# essential adaptation fails (or the failure cannot be bypassed for
-# other reasons), the master transaction fails. Otherwise, the failure
-# is bypassed as if the failed adaptation service was not in the chain.
-#
-# See also: adaptation_access adaptation_service_set
-#
-#Example:
-#adaptation_service_chain svcRequest requestLogger urlFilter leakDetector
-#Default:
-# none
-
-# TAG: adaptation_access
-# Sends an HTTP transaction to an ICAP or eCAP adaptation service.
-#
-# adaptation_access service_name allow|deny [!]aclname...
-# adaptation_access set_name allow|deny [!]aclname...
-#
-# At each supported vectoring point, the adaptation_access
-# statements are processed in the order they appear in this
-# configuration file. Statements pointing to the following services
-# are ignored (i.e., skipped without checking their ACL):
-#
-# - services serving different vectoring points
-# - "broken-but-bypassable" services
-# - "up" services configured to ignore such transactions
-# (e.g., based on the ICAP Transfer-Ignore header).
-#
-# When a set_name is used, all services in the set are checked
-# using the same rules, to find the first applicable one. See
-# adaptation_service_set for details.
-#
-# If an access list is checked and there is a match, the
-# processing stops: For an "allow" rule, the corresponding
-# adaptation service is used for the transaction. For a "deny"
-# rule, no adaptation service is activated.
-#
-# It is currently not possible to apply more than one adaptation
-# service at the same vectoring point to the same HTTP transaction.
-#
-# See also: icap_service and ecap_service
-#
-#Example:
-#adaptation_access service_1 allow all
-#Default:
-# Allow, unless rules exist in squid.conf.
-
-# TAG: adaptation_service_iteration_limit
-# Limits the number of iterations allowed when applying adaptation
-# services to a message. If your longest adaptation set or chain
-# may have more than 16 services, increase the limit beyond its
-# default value of 16. If detecting infinite iteration loops sooner
-# is critical, make the iteration limit match the actual number
-# of services in your longest adaptation set or chain.
-#
-# Infinite adaptation loops are most likely with routing services.
-#
-# See also: icap_service routing=1
-#Default:
-# adaptation_service_iteration_limit 16
-
-# TAG: adaptation_masterx_shared_names
-# For each master transaction (i.e., the HTTP request and response
-# sequence, including all related ICAP and eCAP exchanges), Squid
-# maintains a table of metadata. The table entries are (name, value)
-# pairs shared among eCAP and ICAP exchanges. The table is destroyed
-# with the master transaction.
-#
-# This option specifies the table entry names that Squid must accept
-# from and forward to the adaptation transactions.
-#
-# An ICAP REQMOD or RESPMOD transaction may set an entry in the
-# shared table by returning an ICAP header field with a name
-# specified in adaptation_masterx_shared_names.
-#
-# An eCAP REQMOD or RESPMOD transaction may set an entry in the
-# shared table by implementing the libecap::visitEachOption() API
-# to provide an option with a name specified in
-# adaptation_masterx_shared_names.
-#
-# Squid will store and forward the set entry to subsequent adaptation
-# transactions within the same master transaction scope.
-#
-# Only one shared entry name is supported at this time.
-#
-#Example:
-## share authentication information among ICAP services
-#adaptation_masterx_shared_names X-Subscriber-ID
-#Default:
-# none
-
-# TAG: adaptation_meta
-# This option allows Squid administrator to add custom ICAP request
-# headers or eCAP options to Squid ICAP requests or eCAP transactions.
-# Use it to pass custom authentication tokens and other
-# transaction-state related meta information to an ICAP/eCAP service.
-#
-# The addition of a meta header is ACL-driven:
-# adaptation_meta name value [!]aclname ...
-#
-# Processing for a given header name stops after the first ACL list match.
-# Thus, it is impossible to add two headers with the same name. If no ACL
-# lists match for a given header name, no such header is added. For
-# example:
-#
-# # do not debug transactions except for those that need debugging
-# adaptation_meta X-Debug 1 needs_debugging
-#
-# # log all transactions except for those that must remain secret
-# adaptation_meta X-Log 1 !keep_secret
-#
-# # mark transactions from users in the "G 1" group
-# adaptation_meta X-Authenticated-Groups "G 1" authed_as_G1
-#
-# The "value" parameter may be a regular squid.conf token or a "double
-# quoted string". Within the quoted string, use backslash (\) to escape
-# any character, which is currently only useful for escaping backslashes
-# and double quotes. For example,
-# "this string has one backslash (\\) and two \"quotes\""
-#Default:
-# none
-
-# TAG: icap_retry
-# This ACL determines which retriable ICAP transactions are
-# retried. Transactions that received a complete ICAP response
-# and did not have to consume or produce HTTP bodies to receive
-# that response are usually retriable.
-#
-# icap_retry allow|deny [!]aclname ...
-#
-# Squid automatically retries some ICAP I/O timeouts and errors
-# due to persistent connection race conditions.
-#
-# See also: icap_retry_limit
-#Default:
-# icap_retry deny all
-
-# TAG: icap_retry_limit
-# Limits the number of retries allowed.
-#
-# Communication errors due to persistent connection race
-# conditions are unavoidable, automatically retried, and do not
-# count against this limit.
-#
-# See also: icap_retry
-#Default:
-# No retries are allowed.
-
-# DNS OPTIONS
-# -----------------------------------------------------------------------------
-
-# TAG: check_hostnames
-# For security and stability reasons Squid can check
-# hostnames for Internet standard RFC compliance. If you want
-# Squid to perform these checks turn this directive on.
-#Default:
-# check_hostnames off
-
-# TAG: allow_underscore
-# Underscore characters is not strictly allowed in Internet hostnames
-# but nevertheless used by many sites. Set this to off if you want
-# Squid to be strict about the standard.
-# This check is performed only when check_hostnames is set to on.
-#Default:
-# allow_underscore on
-
-# TAG: cache_dns_program
-# Note: This option is only available if Squid is rebuilt with the
-# --disable-internal-dns
-#
-# Specify the location of the executable for dnslookup process.
-#Default:
-# cache_dns_program /usr/lib/squid3/dnsserver
-
-# TAG: dns_children
-# Note: This option is only available if Squid is rebuilt with the
-# --disable-internal-dns
-#
-# The maximum number of processes spawn to service DNS name lookups.
-# If you limit it too few Squid will have to wait for them to process
-# a backlog of requests, slowing it down. If you allow too many they
-# will use RAM and other system resources noticably.
-# The maximum this may be safely set to is 32.
-#
-# The startup= and idle= options allow some measure of skew in your
-# tuning.
-#
-# startup=
-#
-# Sets a minimum of how many processes are to be spawned when Squid
-# starts or reconfigures. When set to zero the first request will
-# cause spawning of the first child process to handle it.
-#
-# Starting too few will cause an initial slowdown in traffic as Squid
-# attempts to simultaneously spawn enough processes to cope.
-#
-# idle=
-#
-# Sets a minimum of how many processes Squid is to try and keep available
-# at all times. When traffic begins to rise above what the existing
-# processes can handle this many more will be spawned up to the maximum
-# configured. A minimum setting of 1 is required.
-#Default:
-# dns_children 32 startup=1 idle=1
-
-# TAG: dns_retransmit_interval
-# Initial retransmit interval for DNS queries. The interval is
-# doubled each time all configured DNS servers have been tried.
-#Default:
-# dns_retransmit_interval 5 seconds
-
-# TAG: dns_timeout
-# DNS Query timeout. If no response is received to a DNS query
-# within this time all DNS servers for the queried domain
-# are assumed to be unavailable.
-#Default:
-# dns_timeout 30 seconds
-
-# TAG: dns_packet_max
-# Maximum number of bytes packet size to advertise via EDNS.
-# Set to "none" to disable EDNS large packet support.
-#
-# For legacy reasons DNS UDP replies will default to 512 bytes which
-# is too small for many responses. EDNS provides a means for Squid to
-# negotiate receiving larger responses back immediately without having
-# to failover with repeat requests. Responses larger than this limit
-# will retain the old behaviour of failover to TCP DNS.
-#
-# Squid has no real fixed limit internally, but allowing packet sizes
-# over 1500 bytes requires network jumbogram support and is usually not
-# necessary.
-#
-# WARNING: The RFC also indicates that some older resolvers will reply
-# with failure of the whole request if the extension is added. Some
-# resolvers have already been identified which will reply with mangled
-# EDNS response on occasion. Usually in response to many-KB jumbogram
-# sizes being advertised by Squid.
-# Squid will currently treat these both as an unable-to-resolve domain
-# even if it would be resolvable without EDNS.
-#Default:
-# EDNS disabled
-
-# TAG: dns_defnames on|off
-# Normally the RES_DEFNAMES resolver option is disabled
-# (see res_init(3)). This prevents caches in a hierarchy
-# from interpreting single-component hostnames locally. To allow
-# Squid to handle single-component names, enable this option.
-#Default:
-# Search for single-label domain names is disabled.
-
-# TAG: dns_nameservers
-# Use this if you want to specify a list of DNS name servers
-# (IP addresses) to use instead of those given in your
-# /etc/resolv.conf file.
-#
-# On Windows platforms, if no value is specified here or in
-# the /etc/resolv.conf file, the list of DNS name servers are
-# taken from the Windows registry, both static and dynamic DHCP
-# configurations are supported.
-#
-# Example: dns_nameservers 10.0.0.1 192.172.0.4
-#Default:
-# Use operating system definitions
-
-# TAG: hosts_file
-# Location of the host-local IP name-address associations
-# database. Most Operating Systems have such a file on different
-# default locations:
-# - Un*X & Linux: /etc/hosts
-# - Windows NT/2000: %SystemRoot%\system32\drivers\etc\hosts
-# (%SystemRoot% value install default is c:\winnt)
-# - Windows XP/2003: %SystemRoot%\system32\drivers\etc\hosts
-# (%SystemRoot% value install default is c:\windows)
-# - Windows 9x/Me: %windir%\hosts
-# (%windir% value is usually c:\windows)
-# - Cygwin: /etc/hosts
-#
-# The file contains newline-separated definitions, in the
-# form ip_address_in_dotted_form name [name ...] names are
-# whitespace-separated. Lines beginning with an hash (#)
-# character are comments.
-#
-# The file is checked at startup and upon configuration.
-# If set to 'none', it won't be checked.
-# If append_domain is used, that domain will be added to
-# domain-local (i.e. not containing any dot character) host
-# definitions.
-#Default:
-# hosts_file /etc/hosts
-
-# TAG: append_domain
-# Appends local domain name to hostnames without any dots in
-# them. append_domain must begin with a period.
-#
-# Be warned there are now Internet names with no dots in
-# them using only top-domain names, so setting this may
-# cause some Internet sites to become unavailable.
-#
-#Example:
-# append_domain .yourdomain.com
-#Default:
-# Use operating system definitions
-
-# TAG: ignore_unknown_nameservers
-# By default Squid checks that DNS responses are received
-# from the same IP addresses they are sent to. If they
-# don't match, Squid ignores the response and writes a warning
-# message to cache.log. You can allow responses from unknown
-# nameservers by setting this option to 'off'.
-#Default:
-# ignore_unknown_nameservers on
-
-# TAG: dns_v4_first
-# With the IPv6 Internet being as fast or faster than IPv4 Internet
-# for most networks Squid prefers to contact websites over IPv6.
-#
-# This option reverses the order of preference to make Squid contact
-# dual-stack websites over IPv4 first. Squid will still perform both
-# IPv6 and IPv4 DNS lookups before connecting.
-#
-# WARNING:
-# This option will restrict the situations under which IPv6
-# connectivity is used (and tested), potentially hiding network
-# problems which would otherwise be detected and warned about.
-#Default:
-# dns_v4_first off
-
-# TAG: ipcache_size (number of entries)
-# Maximum number of DNS IP cache entries.
-#Default:
-# ipcache_size 1024
-
-# TAG: ipcache_low (percent)
-#Default:
-# ipcache_low 90
-
-# TAG: ipcache_high (percent)
-# The size, low-, and high-water marks for the IP cache.
-#Default:
-# ipcache_high 95
-
-# TAG: fqdncache_size (number of entries)
-# Maximum number of FQDN cache entries.
-#Default:
-# fqdncache_size 1024
-
-# MISCELLANEOUS
-# -----------------------------------------------------------------------------
-
-# TAG: memory_pools on|off
-# If set, Squid will keep pools of allocated (but unused) memory
-# available for future use. If memory is a premium on your
-# system and you believe your malloc library outperforms Squid
-# routines, disable this.
-#Default:
-# memory_pools on
-
-# TAG: memory_pools_limit (bytes)
-# Used only with memory_pools on:
-# memory_pools_limit 50 MB
-#
-# If set to a non-zero value, Squid will keep at most the specified
-# limit of allocated (but unused) memory in memory pools. All free()
-# requests that exceed this limit will be handled by your malloc
-# library. Squid does not pre-allocate any memory, just safe-keeps
-# objects that otherwise would be free()d. Thus, it is safe to set
-# memory_pools_limit to a reasonably high value even if your
-# configuration will use less memory.
-#
-# If set to none, Squid will keep all memory it can. That is, there
-# will be no limit on the total amount of memory used for safe-keeping.
-#
-# To disable memory allocation optimization, do not set
-# memory_pools_limit to 0 or none. Set memory_pools to "off" instead.
-#
-# An overhead for maintaining memory pools is not taken into account
-# when the limit is checked. This overhead is close to four bytes per
-# object kept. However, pools may actually _save_ memory because of
-# reduced memory thrashing in your malloc library.
-#Default:
-# memory_pools_limit 5 MB
-
-# TAG: forwarded_for on|off|transparent|truncate|delete
-# If set to "on", Squid will append your client's IP address
-# in the HTTP requests it forwards. By default it looks like:
-#
-# X-Forwarded-For: 192.1.2.3
-#
-# If set to "off", it will appear as
-#
-# X-Forwarded-For: unknown
-#
-# If set to "transparent", Squid will not alter the
-# X-Forwarded-For header in any way.
-#
-# If set to "delete", Squid will delete the entire
-# X-Forwarded-For header.
-#
-# If set to "truncate", Squid will remove all existing
-# X-Forwarded-For entries, and place the client IP as the sole entry.
-#Default:
-# forwarded_for on
-
-# TAG: cachemgr_passwd
-# Specify passwords for cachemgr operations.
-#
-# Usage: cachemgr_passwd password action action ...
-#
-# Some valid actions are (see cache manager menu for a full list):
-# 5min
-# 60min
-# asndb
-# authenticator
-# cbdata
-# client_list
-# comm_incoming
-# config *
-# counters
-# delay
-# digest_stats
-# dns
-# events
-# filedescriptors
-# fqdncache
-# histograms
-# http_headers
-# info
-# io
-# ipcache
-# mem
-# menu
-# netdb
-# non_peers
-# objects
-# offline_toggle *
-# pconn
-# peer_select
-# reconfigure *
-# redirector
-# refresh
-# server_list
-# shutdown *
-# store_digest
-# storedir
-# utilization
-# via_headers
-# vm_objects
-#
-# * Indicates actions which will not be performed without a
-# valid password, others can be performed if not listed here.
-#
-# To disable an action, set the password to "disable".
-# To allow performing an action without a password, set the
-# password to "none".
-#
-# Use the keyword "all" to set the same password for all actions.
-#
-#Example:
-# cachemgr_passwd secret shutdown
-# cachemgr_passwd lesssssssecret info stats/objects
-# cachemgr_passwd disable all
-#Default:
-# No password. Actions which require password are denied.
-
-# TAG: client_db on|off
-# If you want to disable collecting per-client statistics,
-# turn off client_db here.
-#Default:
-# client_db on
-
-# TAG: refresh_all_ims on|off
-# When you enable this option, squid will always check
-# the origin server for an update when a client sends an
-# If-Modified-Since request. Many browsers use IMS
-# requests when the user requests a reload, and this
-# ensures those clients receive the latest version.
-#
-# By default (off), squid may return a Not Modified response
-# based on the age of the cached version.
-#Default:
-# refresh_all_ims off
-
-# TAG: reload_into_ims on|off
-# When you enable this option, client no-cache or ``reload''
-# requests will be changed to If-Modified-Since requests.
-# Doing this VIOLATES the HTTP standard. Enabling this
-# feature could make you liable for problems which it
-# causes.
-#
-# see also refresh_pattern for a more selective approach.
-#Default:
-# reload_into_ims off
-
-# TAG: connect_retries
-# This sets the maximum number of connection attempts made for each
-# TCP connection. The connect_retries attempts must all still
-# complete within the connection timeout period.
-#
-# The default is not to re-try if the first connection attempt fails.
-# The (not recommended) maximum is 10 tries.
-#
-# A warning message will be generated if it is set to a too-high
-# value and the configured value will be over-ridden.
-#
-# Note: These re-tries are in addition to forward_max_tries
-# which limit how many different addresses may be tried to find
-# a useful server.
-#Default:
-# Do not retry failed connections.
-
-# TAG: retry_on_error
-# If set to ON Squid will automatically retry requests when
-# receiving an error response with status 403 (Forbidden),
-# 500 (Internal Error), 501 or 503 (Service not available).
-# Status 502 and 504 (Gateway errors) are always retried.
-#
-# This is mainly useful if you are in a complex cache hierarchy to
-# work around access control errors.
-#
-# NOTE: This retry will attempt to find another working destination.
-# Which is different from the server which just failed.
-#Default:
-# retry_on_error off
-
-# TAG: as_whois_server
-# WHOIS server to query for AS numbers. NOTE: AS numbers are
-# queried only when Squid starts up, not for every request.
-#Default:
-# as_whois_server whois.ra.net
-
-# TAG: offline_mode
-# Enable this option and Squid will never try to validate cached
-# objects.
-#Default:
-# offline_mode off
-
-# TAG: uri_whitespace
-# What to do with requests that have whitespace characters in the
-# URI. Options:
-#
-# strip: The whitespace characters are stripped out of the URL.
-# This is the behavior recommended by RFC2396 and RFC3986
-# for tolerant handling of generic URI.
-# NOTE: This is one difference between generic URI and HTTP URLs.
-#
-# deny: The request is denied. The user receives an "Invalid
-# Request" message.
-# This is the behaviour recommended by RFC2616 for safe
-# handling of HTTP request URL.
-#
-# allow: The request is allowed and the URI is not changed. The
-# whitespace characters remain in the URI. Note the
-# whitespace is passed to redirector processes if they
-# are in use.
-# Note this may be considered a violation of RFC2616
-# request parsing where whitespace is prohibited in the
-# URL field.
-#
-# encode: The request is allowed and the whitespace characters are
-# encoded according to RFC1738.
-#
-# chop: The request is allowed and the URI is chopped at the
-# first whitespace.
-#
-#
-# NOTE the current Squid implementation of encode and chop violates
-# RFC2616 by not using a 301 redirect after altering the URL.
-#Default:
-# uri_whitespace strip
-
-# TAG: chroot
-# Specifies a directory where Squid should do a chroot() while
-# initializing. This also causes Squid to fully drop root
-# privileges after initializing. This means, for example, if you
-# use a HTTP port less than 1024 and try to reconfigure, you may
-# get an error saying that Squid can not open the port.
-#Default:
-# none
-
-# TAG: balance_on_multiple_ip
-# Modern IP resolvers in squid sort lookup results by preferred access.
-# By default squid will use these IP in order and only rotates to
-# the next listed when the most preffered fails.
-#
-# Some load balancing servers based on round robin DNS have been
-# found not to preserve user session state across requests
-# to different IP addresses.
-#
-# Enabling this directive Squid rotates IP's per request.
-#Default:
-# balance_on_multiple_ip off
-
-# TAG: pipeline_prefetch
-# To boost the performance of pipelined requests to closer
-# match that of a non-proxied environment Squid can try to fetch
-# up to two requests in parallel from a pipeline.
-#
-# Defaults to off for bandwidth management and access logging
-# reasons.
-#
-# WARNING: pipelining breaks NTLM and Negotiate/Kerberos authentication.
-#Default:
-# pipeline_prefetch off
-
-# TAG: high_response_time_warning (msec)
-# If the one-minute median response time exceeds this value,
-# Squid prints a WARNING with debug level 0 to get the
-# administrators attention. The value is in milliseconds.
-#Default:
-# disabled.
-
-# TAG: high_page_fault_warning
-# If the one-minute average page fault rate exceeds this
-# value, Squid prints a WARNING with debug level 0 to get
-# the administrators attention. The value is in page faults
-# per second.
-#Default:
-# disabled.
-
-# TAG: high_memory_warning
-# If the memory usage (as determined by mallinfo) exceeds
-# this amount, Squid prints a WARNING with debug level 0 to get
-# the administrators attention.
-#Default:
-# disabled.
-
-# TAG: sleep_after_fork (microseconds)
-# When this is set to a non-zero value, the main Squid process
-# sleeps the specified number of microseconds after a fork()
-# system call. This sleep may help the situation where your
-# system reports fork() failures due to lack of (virtual)
-# memory. Note, however, if you have a lot of child
-# processes, these sleep delays will add up and your
-# Squid will not service requests for some amount of time
-# until all the child processes have been started.
-# On Windows value less then 1000 (1 milliseconds) are
-# rounded to 1000.
-#Default:
-# sleep_after_fork 0
-
-# TAG: windows_ipaddrchangemonitor on|off
-# Note: This option is only available if Squid is rebuilt with the
-# MS Windows
-#
-# On Windows Squid by default will monitor IP address changes and will
-# reconfigure itself after any detected event. This is very useful for
-# proxies connected to internet with dial-up interfaces.
-# In some cases (a Proxy server acting as VPN gateway is one) it could be
-# desiderable to disable this behaviour setting this to 'off'.
-# Note: after changing this, Squid service must be restarted.
-#Default:
-# windows_ipaddrchangemonitor on
-
-# TAG: eui_lookup
-# Whether to lookup the EUI or MAC address of a connected client.
-#Default:
-# eui_lookup on
-
-# TAG: max_filedescriptors
-# Reduce the maximum number of filedescriptors supported below
-# the usual operating system defaults.
-#
-# Remove from squid.conf to inherit the current ulimit setting.
-#
-# Note: Changing this requires a restart of Squid. Also
-# not all I/O types supports large values (eg on Windows).
-#Default:
-# Use operating system limits set by ulimit.
-
-# TAG: workers
-# Number of main Squid processes or "workers" to fork and maintain.
-# 0: "no daemon" mode, like running "squid -N ..."
-# 1: "no SMP" mode, start one main Squid process daemon (default)
-# N: start N main Squid process daemons (i.e., SMP mode)
-#
-# In SMP mode, each worker does nearly all what a single Squid daemon
-# does (e.g., listen on http_port and forward HTTP requests).
-#Default:
-# SMP support disabled.
-
-# TAG: cpu_affinity_map
-# Usage: cpu_affinity_map process_numbers=P1,P2,... cores=C1,C2,...
-#
-# Sets 1:1 mapping between Squid processes and CPU cores. For example,
-#
-# cpu_affinity_map process_numbers=1,2,3,4 cores=1,3,5,7
-#
-# affects processes 1 through 4 only and places them on the first
-# four even cores, starting with core #1.
-#
-# CPU cores are numbered starting from 1. Requires support for
-# sched_getaffinity(2) and sched_setaffinity(2) system calls.
-#
-# Multiple cpu_affinity_map options are merged.
-#
-# See also: workers
-#Default:
-# Let operating system decide.
-
diff --git a/ansible/roles/squid/files/squid.conf.org.diff b/ansible/roles/squid/files/squid.conf.org.diff
deleted file mode 100644
index b07d1a2..0000000
--- a/ansible/roles/squid/files/squid.conf.org.diff
+++ /dev/null
@@ -1,41 +0,0 @@
-This is patch against default config of squid3-3.3.8-1ubuntu6 (as shipped
-by Ubuntu 14.04). As it's not immediately clear if Squid supports conf.d
-scheme to add just the needed changes, we actually deploy complete config
-with these changes, and this diff is provided just for reference.
-
-TODO: Figure out the above.
-
---- squid.conf.org 2014-06-07 17:19:49.480115826 +0300
-+++ squid.conf 2014-06-07 17:19:36.800109594 +0300
-@@ -898,11 +898,12 @@
- # Example rule allowing access from your local networks.
- # Adapt to list your (internal) IP networks from where browsing
- # should be allowed
--#acl localnet src 10.0.0.0/8 # RFC1918 possible internal network
-+acl localnet src 10.0.0.0/8 # RFC1918 possible internal network
- #acl localnet src 172.16.0.0/12 # RFC1918 possible internal network
- #acl localnet src 192.168.0.0/16 # RFC1918 possible internal network
- #acl localnet src fc00::/7 # RFC 4193 local private network range
- #acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
-+acl safe_dst_host dstdomain .linaro.org .kernel.org security.ubuntu.com private-ppa.launchpad.net ppa.launchpad.net .archive.ubuntu.com ports.ubuntu.com .googlesource.com
-
- acl SSL_ports port 443
- acl Safe_ports port 80 # http
-@@ -1053,7 +1054,8 @@
- # Example rule allowing access from your local networks.
- # Adapt localnet in the ACL section to list your (internal) IP networks
- # from where browsing should be allowed
--#http_access allow localnet
-+http_access deny !safe_dst_host
-+http_access allow localnet
- http_access allow localhost
-
- # And finally deny all other access to this proxy
-@@ -2998,6 +3000,7 @@
-
- # Uncomment and adjust the following to add a disk cache directory.
- #cache_dir ufs /var/spool/squid3 100 16 256
-+cache_dir ufs /mnt/spool/squid/ 30000 16 256
-
- # TAG: store_dir_select_algorithm
- # How Squid selects which cache_dir to use when the response
diff --git a/ansible/roles/squid/tasks/main.yml b/ansible/roles/squid/tasks/main.yml
deleted file mode 100644
index e9c2361..0000000
--- a/ansible/roles/squid/tasks/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-- name: Install Squid
- apt: pkg={{item}}
- sudo: yes
- with_items:
- - squid
- when: hosttype == "ci"
-
-- name: Install Squid config
- copy: src=squid.conf dest=/etc/squid3/squid.conf
- sudo: yes
- when: hosttype == "ci"
diff --git a/ansible/roles/sslcert/tasks/main.yml b/ansible/roles/sslcert/tasks/main.yml
deleted file mode 100644
index f25a90c..0000000
--- a/ansible/roles/sslcert/tasks/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-- include: self-signed.yml
- when: ssl_cert_self_signed
-- include: production.yml
- when: not ssl_cert_self_signed
-
-- name: Copy Go Daddy certificate bundle
- copy: src=files/gd_bundle.crt dest=/etc/ssl/certs/gd_bundle.crt mode=0644
- sudo: yes
diff --git a/ansible/roles/sslcert/tasks/production.yml b/ansible/roles/sslcert/tasks/production.yml
deleted file mode 100644
index 859e011..0000000
--- a/ansible/roles/sslcert/tasks/production.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-- name: Copy production SSL key
- copy: src={{secrets_dir}}/ssl/{{inventory_hostname}}.key dest={{ssl_key}} mode=600 backup=yes
- sudo: yes
- notify:
- - Restart Apache
-- name: Copy production SSL cert
- copy: src={{secrets_dir}}/ssl/{{inventory_hostname}}.crt dest={{ssl_cert}} mode=600 backup=yes
- sudo: yes
- notify:
- - Restart Apache
diff --git a/ansible/roles/sslcert/tasks/self-signed.yml b/ansible/roles/sslcert/tasks/self-signed.yml
deleted file mode 100644
index eaf555e..0000000
--- a/ansible/roles/sslcert/tasks/self-signed.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: Prepare OpenSSL config
- template: src=ssleay.conf dest=/tmp/
-- name: Create self-signed SSL certificate
- command: openssl req -config /tmp/ssleay.conf -new -x509 -days 3650 -nodes -out {{ssl_cert}} -keyout {{ssl_key}}
- creates={{ssl_cert}}
- sudo: yes
- notify:
- - Restart Apache
-- name: Set permissions on certificate
- file: path={{ssl_cert}} mode=600
- sudo: yes
-- name: Set permissions on certificate key
- file: path={{ssl_key}} mode=600
- sudo: yes
diff --git a/ansible/roles/sslcert/templates/ssleay.conf b/ansible/roles/sslcert/templates/ssleay.conf
deleted file mode 100644
index f333d5e..0000000
--- a/ansible/roles/sslcert/templates/ssleay.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-RANDFILE = /dev/urandom
-[req]
-default_bits = 1024
-default_keyfile = privkey.pem
-distinguished_name = req_distinguished_name
-prompt = no
-policy = policy_anything
-[req_distinguished_name]
-commonName = {{site_name}}
diff --git a/ansible/secrets b/ansible/secrets
deleted file mode 120000
index b0e0a52..0000000
--- a/ansible/secrets
+++ /dev/null
@@ -1 +0,0 @@
-../../shared-credentials/ansible/jenkins-servers \ No newline at end of file
diff --git a/ansible/site.yml b/ansible/site.yml
deleted file mode 100644
index bd2a790..0000000
--- a/ansible/site.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-# Main site file - deploys everything
----
-- include: jenkins.yml
-- include: frontend.yml
-- include: reposeed.yml
diff --git a/ansible/support/jenkins_plugins2ansible.py b/ansible/support/jenkins_plugins2ansible.py
deleted file mode 100644
index ac6107f..0000000
--- a/ansible/support/jenkins_plugins2ansible.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Feed https://tcwg.ci.linaro.org/pluginManager/installed into this script
-import sys
-
-import lxml
-import lxml.etree
-import lxml.html
-from lxml.cssselect import CSSSelector
-from lxml.html import tostring
-
-tree = lxml.html.parse(sys.argv[1])
-root = tree.getroot()
-
-def innerhtml(el):
- return ''.join([tostring(child) for child in el.iterchildren()])
-
-for r in root.cssselect("table#plugins")[0].cssselect("tr"):
- tds = r.cssselect("td")
- if not tds:
- continue
- a = tds[2].getchildren()[0]
- plugin = a.get("href").split("/")[1]
- mark = ""
- if plugin in ("credentials", "ssh-credentials"):
- mark = "a"
- ver = a.text_content()
- print " - %shttp://updates.jenkins-ci.org/download/plugins/%s/%s/%s.hpi" % (mark, plugin, ver, plugin)
diff --git a/ansible/update-production-jenkins.sh b/ansible/update-production-jenkins.sh
deleted file mode 100755
index 5ba8bf0..0000000
--- a/ansible/update-production-jenkins.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-echo "========================================================"
-echo "This will upgrade Jenkins and Jenkins plugins to the versions"
-echo "as maitained in Ansible configuration."
-echo "1. This means DOWNTIME for a Jenkins server, all running builds"
-echo " will be ABORTED."
-echo "2. This may lead to DATA LOSS if you make unvalidated upgrade."
-echo "3. This may lead to IRRECOVERABLE SERVICE LOSS if you mis-type"
-echo " or mis-think something."
-echo "========================================================"
-
-if [ "$#" -ne 2 ]; then
- echo "Usage: $0 <jenkins-host> <Linaro.account>"
- echo "where <jenkins-host> is one of ci.linaro.org, rdk-ci.linaro.org, etc."
- echo "(see hosts-prod for complete list)"
- exit 1
-fi
-
-echo
-echo "Please read the warning above and type YES if you are sure you want to proceed."
-
-read input
-
-if [ "$input" != "YES" ]; then
- echo "Operation cancelled"
- exit 0
-fi
-
-time ansible-playbook -i hosts-prod -l "$1" --user "$2" --ask-sudo-pass jenkins.yml --tags jenkins-install
diff --git a/ansible/vars/empty.yml b/ansible/vars/empty.yml
deleted file mode 100644
index ea367b0..0000000
--- a/ansible/vars/empty.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-__none__: __none__
-smtpAuthPassword: