Browse Source

Initial commit

Eliezer Croitoru 11 months ago
commit
470d52f77a

+ 113 - 0
Makefile

@@ -0,0 +1,113 @@
+TOPDIR = $(shell pwd)
+DATE="date +%Y%m%d"
+PROGRAMNAME=helloworld
+RELEASE=0.0.1
+TMPDIR=/tmp
+BUILDDIR=build
+#PATH=$(shell printenv CCACHE_PATH):$(shell printenv PATH)
+#PATH=/usr/bin:$(shell printenv PATH)
+#PATH = $(PATH):/other/dir
+#PATH=$(shell printenv PATH)
+
+all: buildrpms
+
+build: gitfetch
+	buildrpms	
+
+#build: clean
+#    echo $(TOPDIR)
+#    echo "- Create Changelog file"
+#    git shortlog > changelog.txt
+#    echo "- Create new $(TMPDIR)/$(BUILDDIR)"
+#    mkdir -p $(TMPDIR)/$(BUILDDIR)
+#    mkdir -p $(TMPDIR)/$(BUILDDIR)/$(PROGRAMNAME)
+#    echo "- Copy existing Kermit sources"
+#    rsync -raC --exclude .git . $(TMPDIR)/$(BUILDDIR)/$(PROGRAMNAME)
+#    echo "- Remove useless files"
+#    rm -Rf $(TMPDIR)/$(BUILDDIR)/$(PROGRAMNAME)/src/sqlite.db
+#    echo "- Rename $(PROGRAMNAME) in $(PROGRAMNAME)-$(RELEASE)"
+#    #    mv $(TMPDIR)/$(BUILDDIR)/$(PROGRAMNAME) $(TMPDIR)/$(BUILDDIR)/$(PROGRAMNAME)-$(RELEASE)
+#    echo "- Compressing $(PROGRAMNAME) directory"
+#    tar -czf $(PROGRAMNAME)-$(RELEASE).tar.gz -C $(TMPDIR)/$(BUILDDIR) $(PROGRAMNAME)/
+#    echo "- Moving source package in dist dir"
+#    mkdir -p ./dist
+#    mv $(PROGRAMNAME)-$(RELEASE).tar.gz ./dist
+
+clean:
+#    -rm -rf dist/
+#    -rm -rf rpm-build/
+#    -rm -rf $(TMPDIR)/$(BUILDDIR)
+
+clean_hard:
+
+clean_harder:
+
+clean_hardest: clean_rpms
+
+install: build manpage
+
+install_hard: clean_hard install
+
+install_harder: clean_harder install
+
+install_hardest: clean_harder clean_rpms rpms install_rpm restart
+
+install_rpm:
+
+restart:
+
+recombuild: install_harder restart
+
+clean_rpms:
+#    -rpm -e kermit-webui
+
+sdist: messages
+
+new-rpms: bumprelease rpms
+
+pychecker:
+
+pyflakes:
+
+money: clean
+
+#async: install
+#    /sbin/service httpd restart
+
+testit: clean
+
+unittest:
+
+buildrpms:
+	/usr/bin/bash run.sh
+#    mkdir -p rpm-build
+#    cp dist/*.gz rpm-build/
+#    rpmbuild --define "_topdir %(pwd)/rpm-build" 
+#    --define "_builddir %{_topdir}" 
+#    --define "_rpmdir %{_topdir}" 
+#    --define "_srcrpmdir %{_topdir}" 
+#    --define '_rpmfilename %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm' 
+#   --define "_specdir %{_topdir}" 
+#   --define "_sourcedir  %{_topdir}" 
+#   --define "vendor Think" 
+#   -ba misc/specs/helloworld.spec
+#
+deployrpms:
+	/usr/bin/bash deploy.sh
+
+gitfetch:
+	rm -rf clone
+	/usr/bin/git clone git@gitlab.ngtech.local:elico/squid-centos7-squid35-rpms.git clone
+	mv clone build
+	cd build
+#	rm -fr .git
+#	/usr/bin/git init
+#	/usr/bin/git remote add origin git@gitlab.ngtech.local:elico/squid-centos7-squid35-rpms.git
+#	/usr/bin/git fetch
+#	/usr/bin/git reset --mixed origin/master
+#	mv squid-centos7-squid35-rpms.git build
+#git.sh -i ~/.ssh/thatuserkey.pem clone thatuser@myserver.com:/git/repo.git
+
+crlfclean:
+	sed -i 's/\r//g' SOURCES/*.patch
+	sed -i 's/\r//g' SPECS/*.spec

+ 10 - 0
README.md

@@ -0,0 +1,10 @@
+## This repo is for CentOS 7 x64
+RHEL jenkins install instructions at: https://wiki.jenkins-ci.org/display/JENKINS/Installing+Jenkins+on+Red+Hat+distributions
+
+```
+sudo wget -O /etc/yum.repos.d/jenkins.repo http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo
+sudo rpm --import https://jenkins-ci.org/redhat/jenkins-ci.org.key
+sudo yum install jenkins
+sudo yum search openjdk headless
+# install the right one
+```

+ 682 - 0
SOURCES/assertion_FwdState.cc_serverConnection_eq_conn-trunk-t4-squid4.patch

@@ -0,0 +1,682 @@
+Bug 4447:FwdState.cc:447 "serverConnection() == conn" assertion
+
+After certain failures, FwdState::retryOrBail() may be called twice,
+once from FwdState::unregisterdServerEnd() [called from
+HttpStateData::swanSong()] and once from the FwdState's own connection
+close handler. This may result in two concurrent connections to the
+remote server, followed by an assertion upon a connection closure.
+
+This patch:
+
+ - After HttpStateData failures, instead of closing the squid-to-peer
+   connection directly (and, hence, triggering closure handlers), calls
+   HttpStateData::closeServer() and mustStop() for a cleaner exit with
+   fewer wasteful side effects and better debugging.
+
+ - Creates and remembers a FwdState close handler AsyncCall so that
+   comm_remove_close_handler() can cancel an already scheduled callback.
+   The conversion to the AsyncCall was necessary because legacy [close
+   handler callbacks] cannot be canceled once scheduled.
+
+This is a Measurement Factory project.
+
+=== modified file 'src/FwdState.cc'
+--- src/FwdState.cc	2016-02-13 05:44:58 +0000
++++ src/FwdState.cc	2016-02-27 11:06:44 +0000
+@@ -106,41 +106,42 @@
+ 
+ void
+ FwdState::abort(void* d)
+ {
+     FwdState* fwd = (FwdState*)d;
+     Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
+ 
+     if (Comm::IsConnOpen(fwd->serverConnection())) {
+         fwd->closeServerConnection("store entry aborted");
+     } else {
+         debugs(17, 7, HERE << "store entry aborted; no connection to close");
+     }
+     fwd->serverDestinations.clear();
+     fwd->self = NULL;
+ }
+ 
+ void
+ FwdState::closeServerConnection(const char *reason)
+ {
+     debugs(17, 3, "because " << reason << "; " << serverConn);
+-    comm_remove_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
++    comm_remove_close_handler(serverConn->fd, closeHandler);
++    closeHandler = NULL;
+     fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
+     serverConn->close();
+ }
+ 
+ /**** PUBLIC INTERFACE ********************************************************/
+ 
+ FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRequest * r, const AccessLogEntryPointer &alp):
+     entry(e),
+     request(r),
+     al(alp),
+     err(NULL),
+     clientConn(client),
+     start_t(squid_curtime),
+     n_tries(0),
+     pconnRace(raceImpossible)
+ {
+     debugs(17, 2, "Forwarding client request " << client << ", url=" << e->url());
+     HTTPMSGLOCK(request);
+     serverDestinations.reserve(Config.forward_max_tries);
+     e->lock("FwdState");
+@@ -439,41 +440,42 @@
+         debugs(17, 5, HERE << "pconn race happened");
+         pconnRace = raceHappened;
+     }
+ 
+     if (ConnStateData *pinned_connection = request->pinnedConnection()) {
+         pinned_connection->pinning.zeroReply = true;
+         flags.dont_retry = true; // we want to propagate failure to the client
+         debugs(17, 4, "zero reply on pinned connection");
+     }
+ }
+ 
+ /**
+  * Frees fwdState without closing FD or generating an abort
+  */
+ void
+ FwdState::unregister(Comm::ConnectionPointer &conn)
+ {
+     debugs(17, 3, HERE << entry->url() );
+     assert(serverConnection() == conn);
+     assert(Comm::IsConnOpen(conn));
+-    comm_remove_close_handler(conn->fd, fwdServerClosedWrapper, this);
++    comm_remove_close_handler(conn->fd, closeHandler);
++    closeHandler = NULL;
+     serverConn = NULL;
+ }
+ 
+ // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
+ void
+ FwdState::unregister(int fd)
+ {
+     debugs(17, 3, HERE << entry->url() );
+     assert(fd == serverConnection()->fd);
+     unregister(serverConn);
+ }
+ 
+ /**
+  * FooClient modules call fwdComplete() when they are done
+  * downloading an object.  Then, we either 1) re-forward the
+  * request somewhere else if needed, or 2) call storeComplete()
+  * to finish it off
+  */
+ void
+ FwdState::complete()
+@@ -670,41 +672,41 @@
+ {
+     if (status != Comm::OK) {
+         ErrorState *const anErr = makeConnectingError(ERR_CONNECT_FAIL);
+         anErr->xerrno = xerrno;
+         fail(anErr);
+ 
+         /* it might have been a timeout with a partially open link */
+         if (conn != NULL) {
+             if (conn->getPeer())
+                 peerConnectFailed(conn->getPeer());
+ 
+             conn->close();
+         }
+         retryOrBail();
+         return;
+     }
+ 
+     serverConn = conn;
+     debugs(17, 3, HERE << serverConnection() << ": '" << entry->url() << "'" );
+ 
+-    comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
++    closeHandler = comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
+ 
+ #if USE_OPENSSL
+     if (!request->flags.pinned) {
+         const CachePeer *p = serverConnection()->getPeer();
+         const bool peerWantsTls = p && p->secure.encryptTransport;
+         // userWillTlsToPeerForUs assumes CONNECT == HTTPS
+         const bool userWillTlsToPeerForUs = p && p->options.originserver &&
+                                             request->method == Http::METHOD_CONNECT;
+         const bool needTlsToPeer = peerWantsTls && !userWillTlsToPeerForUs;
+         const bool needTlsToOrigin = !p && request->url.getScheme() == AnyP::PROTO_HTTPS;
+         if (needTlsToPeer || needTlsToOrigin || request->flags.sslPeek) {
+             HttpRequest::Pointer requestPointer = request;
+             AsyncCall::Pointer callback = asyncCall(17,4,
+                                                     "FwdState::ConnectedToPeer",
+                                                     FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
+             // Use positive timeout when less than one second is left.
+             const time_t sslNegotiationTimeout = max(static_cast<time_t>(1), timeLeft());
+             Ssl::PeerConnector *connector = NULL;
+             if (request->flags.sslPeek)
+                 connector = new Ssl::PeekingPeerConnector(requestPointer, serverConnection(), clientConn, callback, al, sslNegotiationTimeout);
+@@ -848,80 +850,81 @@
+     }
+ 
+     request->flags.pinned = false; // XXX: what if the ConnStateData set this to flag existing credentials?
+     // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
+     // XXX: also, logs will now lie if pinning is broken and leads to an error message.
+     if (serverDestinations[0]->peerType == PINNED) {
+         ConnStateData *pinned_connection = request->pinnedConnection();
+         debugs(17,7, "pinned peer connection: " << pinned_connection);
+         // pinned_connection may become nil after a pconn race
+         if (pinned_connection)
+             serverConn = pinned_connection->borrowPinnedConnection(request, serverDestinations[0]->getPeer());
+         else
+             serverConn = NULL;
+         if (Comm::IsConnOpen(serverConn)) {
+             pinned_connection->stopPinnedConnectionMonitoring();
+             flags.connected_okay = true;
+             ++n_tries;
+             request->flags.pinned = true;
+             if (pinned_connection->pinnedAuth())
+                 request->flags.auth = true;
+-            comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
++
++            closeHandler = comm_add_close_handler(serverConn->fd,  fwdServerClosedWrapper, this);
+ 
+             syncWithServerConn(pinned_connection->pinning.host);
+ 
+             // the server may close the pinned connection before this request
+             pconnRace = racePossible;
+             dispatch();
+             return;
+         }
+         // Pinned connection failure.
+         debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection);
+         ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
+         fail(anErr);
+         self = NULL; // refcounted
+         return;
+     }
+ 
+     // Use pconn to avoid opening a new connection.
+     const char *host = NULL;
+     if (!serverDestinations[0]->getPeer())
+         host = request->url.host();
+ 
+     Comm::ConnectionPointer temp;
+     // Avoid pconns after races so that the same client does not suffer twice.
+     // This does not increase the total number of connections because we just
+     // closed the connection that failed the race. And re-pinning assumes this.
+     if (pconnRace != raceHappened)
+         temp = pconnPop(serverDestinations[0], host);
+ 
+     const bool openedPconn = Comm::IsConnOpen(temp);
+     pconnRace = openedPconn ? racePossible : raceImpossible;
+ 
+     // if we found an open persistent connection to use. use it.
+     if (openedPconn) {
+         serverConn = temp;
+         flags.connected_okay = true;
+         debugs(17, 3, HERE << "reusing pconn " << serverConnection());
+         ++n_tries;
+ 
+-        comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
++        closeHandler = comm_add_close_handler(serverConnection()->fd,  fwdServerClosedWrapper, this);
+ 
+         syncWithServerConn(request->url.host());
+ 
+         dispatch();
+         return;
+     }
+ 
+     // We will try to open a new connection, possibly to the same destination.
+     // We reset serverDestinations[0] in case we are using it again because
+     // ConnOpener modifies its destination argument.
+     serverDestinations[0]->local.port(0);
+     serverConn = NULL;
+ 
+ #if URL_CHECKSUM_DEBUG
+     entry->mem_obj->checkUrlChecksum();
+ #endif
+ 
+     GetMarkingsToServer(request, *serverDestinations[0]);
+ 
+     calls.connector = commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper, this));
+
+=== modified file 'src/FwdState.h'
+--- src/FwdState.h	2016-01-25 08:26:32 +0000
++++ src/FwdState.h	2016-02-27 11:06:44 +0000
+@@ -136,29 +136,31 @@
+     Comm::ConnectionPointer clientConn;        ///< a possibly open connection to the client.
+     time_t start_t;
+     int n_tries;
+ 
+     // AsyncCalls which we set and may need cancelling.
+     struct {
+         AsyncCall::Pointer connector;  ///< a call linking us to the ConnOpener producing serverConn.
+     } calls;
+ 
+     struct {
+         bool connected_okay; ///< TCP link ever opened properly. This affects retry of POST,PUT,CONNECT,etc
+         bool dont_retry;
+         bool forward_completed;
+     } flags;
+ 
+     /** connections to open, in order, until successful */
+     Comm::ConnectionList serverDestinations;
+ 
+     Comm::ConnectionPointer serverConn; ///< a successfully opened connection to a server.
+ 
++    AsyncCall::Pointer closeHandler; ///< The serverConn close handler
++
+     /// possible pconn race states
+     typedef enum { raceImpossible, racePossible, raceHappened } PconnRace;
+     PconnRace pconnRace; ///< current pconn race state
+ };
+ 
+ void getOutgoingAddress(HttpRequest * request, Comm::ConnectionPointer conn);
+ 
+ #endif /* SQUID_FORWARD_H */
+ 
+
+=== modified file 'src/clients/Client.h'
+--- src/clients/Client.h	2016-02-19 21:23:08 +0000
++++ src/clients/Client.h	2016-02-27 11:06:44 +0000
+@@ -87,41 +87,43 @@
+     void serverComplete2();    /**< Continuation of serverComplete */
+     bool completed;            /**< serverComplete() has been called */
+ 
+ protected:
+     // kids customize these
+     virtual void haveParsedReplyHeaders(); /**< called when got final headers */
+     virtual void completeForwarding(); /**< default calls fwd->complete() */
+ 
+     // BodyConsumer for HTTP: consume request body.
+     bool startRequestBodyFlow();
+     void handleMoreRequestBodyAvailable();
+     void handleRequestBodyProductionEnded();
+     virtual void handleRequestBodyProducerAborted() = 0;
+ 
+     // sending of the request body to the server
+     void sendMoreRequestBody();
+     // has body; kids overwrite to increment I/O stats counters
+     virtual void sentRequestBody(const CommIoCbParams &io) = 0;
+     virtual void doneSendingRequestBody() = 0;
+ 
+-    virtual void closeServer() = 0;            /**< end communication with the server */
++    /// Use this to end communication with the server. The call cancels our
++    /// closure handler and tells FwdState to forget about the connection.
++    virtual void closeServer() = 0;
+     virtual bool doneWithServer() const = 0;   /**< did we end communication? */
+     /// whether we may receive more virgin response body bytes
+     virtual bool mayReadVirginReplyBody() const = 0;
+ 
+     /// Entry-dependent callbacks use this check to quit if the entry went bad
+     bool abortOnBadEntry(const char *abortReason);
+ 
+     bool blockCaching();
+ 
+ #if USE_ADAPTATION
+     void startAdaptation(const Adaptation::ServiceGroupPointer &group, HttpRequest *cause);
+     void adaptVirginReplyBody(const char *buf, ssize_t len);
+     void cleanAdaptation();
+     virtual bool doneWithAdaptation() const;   /**< did we end ICAP communication? */
+ 
+     // BodyConsumer for ICAP: consume adapted response body.
+     void handleMoreAdaptedBodyAvailable();
+     void handleAdaptedBodyProductionEnded();
+     void handleAdaptedBodyProducerAborted();
+ 
+
+=== modified file 'src/comm.cc'
+--- src/comm.cc	2016-01-26 21:02:00 +0000
++++ src/comm.cc	2016-02-27 11:06:44 +0000
+@@ -955,49 +955,50 @@
+     struct addrinfo *AI = NULL;
+     to_addr.getAddrInfo(AI, fd_table[fd].sock_family);
+     int x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
+     Ip::Address::FreeAddr(AI);
+ 
+     PROF_stop(comm_udp_sendto);
+ 
+     if (x >= 0)
+         return x;
+ 
+ #if _SQUID_LINUX_
+ 
+     if (ECONNREFUSED != errno)
+ #endif
+ 
+         debugs(50, DBG_IMPORTANT, "comm_udp_sendto: FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerror());
+ 
+     return Comm::COMM_ERROR;
+ }
+ 
+-void
++AsyncCall::Pointer
+ comm_add_close_handler(int fd, CLCB * handler, void *data)
+ {
+     debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
+            handler << ", data=" << data);
+ 
+     AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
+                                        CommCloseCbPtrFun(handler, data));
+     comm_add_close_handler(fd, call);
++    return call;
+ }
+ 
+ void
+ comm_add_close_handler(int fd, AsyncCall::Pointer &call)
+ {
+     debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
+ 
+     /*TODO:Check for a similar scheduled AsyncCall*/
+ //    for (c = fd_table[fd].closeHandler; c; c = c->next)
+ //        assert(c->handler != handler || c->data != data);
+ 
+     call->setNext(fd_table[fd].closeHandler);
+ 
+     fd_table[fd].closeHandler = call;
+ }
+ 
+ // remove function-based close handler
+ void
+ comm_remove_close_handler(int fd, CLCB * handler, void *data)
+ {
+
+=== modified file 'src/comm.h'
+--- src/comm.h	2016-01-01 00:12:18 +0000
++++ src/comm.h	2016-02-27 11:06:44 +0000
+@@ -62,41 +62,41 @@
+ int comm_openex(int, int, Ip::Address &, int, const char *);
+ unsigned short comm_local_port(int fd);
+ 
+ int comm_udp_sendto(int sock, const Ip::Address &to, const void *buf, int buflen);
+ void commCallCloseHandlers(int fd);
+ 
+ /// clear a timeout handler by FD number
+ void commUnsetFdTimeout(int fd);
+ 
+ /**
+  * Set or clear the timeout for some action on an active connection.
+  * API to replace commSetTimeout() when a Comm::ConnectionPointer is available.
+  */
+ int commSetConnTimeout(const Comm::ConnectionPointer &conn, int seconds, AsyncCall::Pointer &callback);
+ int commUnsetConnTimeout(const Comm::ConnectionPointer &conn);
+ 
+ int ignoreErrno(int);
+ void commCloseAllSockets(void);
+ void checkTimeouts(void);
+ 
+-void comm_add_close_handler(int fd, CLCB *, void *);
++AsyncCall::Pointer comm_add_close_handler(int fd, CLCB *, void *);
+ void comm_add_close_handler(int fd, AsyncCall::Pointer &);
+ void comm_remove_close_handler(int fd, CLCB *, void *);
+ void comm_remove_close_handler(int fd, AsyncCall::Pointer &);
+ 
+ int comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from);
+ int comm_udp_recv(int fd, void *buf, size_t len, int flags);
+ ssize_t comm_udp_send(int s, const void *buf, size_t len, int flags);
+ bool comm_has_incomplete_write(int);
+ 
+ /** The read channel has closed and the caller does not expect more data
+  * but needs to detect connection aborts. The current detection method uses
+  * 0-length reads: We read until the error occurs or the writer closes
+  * the connection. If there is a read error, we close the connection.
+  */
+ void commStartHalfClosedMonitor(int fd);
+ bool commHasHalfClosedMonitor(int fd);
+ // XXX: remove these wrappers which minimize client_side.cc changes in a commit
+ inline void commMarkHalfClosed(int fd) { commStartHalfClosedMonitor(fd); }
+ inline bool commIsHalfClosed(int fd) { return commHasHalfClosedMonitor(fd); }
+ 
+
+=== modified file 'src/http.cc'
+--- src/http.cc	2016-02-19 21:23:08 +0000
++++ src/http.cc	2016-02-27 11:09:07 +0000
+@@ -149,41 +149,42 @@
+     return serverConnection;
+ }
+ 
+ void
+ HttpStateData::httpStateConnClosed(const CommCloseCbParams &params)
+ {
+     debugs(11, 5, "httpStateFree: FD " << params.fd << ", httpState=" << params.data);
+     doneWithFwd = "httpStateConnClosed()"; // assume FwdState is monitoring too
+     mustStop("HttpStateData::httpStateConnClosed");
+ }
+ 
+ void
+ HttpStateData::httpTimeout(const CommTimeoutCbParams &)
+ {
+     debugs(11, 4, serverConnection << ": '" << entry->url() << "'");
+ 
+     if (entry->store_status == STORE_PENDING) {
+         fwd->fail(new ErrorState(ERR_READ_TIMEOUT, Http::scGatewayTimeout, fwd->request));
+     }
+ 
+-    serverConnection->close();
++    closeServer();
++    mustStop("HttpStateData::httpTimeout");
+ }
+ 
+ /// Remove an existing public store entry if the incoming response (to be
+ /// stored in a currently private entry) is going to invalidate it.
+ static void
+ httpMaybeRemovePublic(StoreEntry * e, Http::StatusCode status)
+ {
+     int remove = 0;
+     int forbidden = 0;
+     StoreEntry *pe;
+ 
+     // If the incoming response already goes into a public entry, then there is
+     // nothing to remove. This protects ready-for-collapsing entries as well.
+     if (!EBIT_TEST(e->flags, KEY_PRIVATE))
+         return;
+ 
+     switch (status) {
+ 
+     case Http::scOkay:
+ 
+@@ -1217,42 +1218,42 @@
+             request->hier.peer_response_time.tv_sec = -1;
+     }
+ 
+         /* Continue to process previously read data */
+     break;
+ 
+     case Comm::ENDFILE: // close detected by 0-byte read
+         eof = 1;
+         flags.do_next_read = false;
+ 
+         /* Continue to process previously read data */
+         break;
+ 
+     // case Comm::COMM_ERROR:
+     default: // no other flags should ever occur
+         debugs(11, 2, io.conn << ": read failure: " << xstrerr(rd.xerrno));
+         ErrorState *err = new ErrorState(ERR_READ_ERROR, Http::scBadGateway, fwd->request);
+         err->xerrno = rd.xerrno;
+         fwd->fail(err);
+         flags.do_next_read = false;
+-        io.conn->close();
+-
++        closeServer();
++        mustStop("HttpStateData::readReply");
+         return;
+     }
+ 
+     /* Process next response from buffer */
+     processReply();
+ }
+ 
+ /// processes the already read and buffered response data, possibly after
+ /// waiting for asynchronous 1xx control message processing
+ void
+ HttpStateData::processReply()
+ {
+ 
+     if (flags.handling1xx) { // we came back after handling a 1xx response
+         debugs(11, 5, HERE << "done with 1xx handling");
+         flags.handling1xx = false;
+         Must(!flags.headers_parsed);
+     }
+ 
+     if (!flags.headers_parsed) { // have not parsed headers yet?
+@@ -1318,41 +1319,42 @@
+         } else {
+             // parsed headers but got no reply
+             debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: No reply at all for " << entry->url() << " AKA " << request->url);
+             error = ERR_INVALID_RESP;
+         }
+     } else {
+         assert(eof);
+         if (inBuf.length()) {
+             error = ERR_INVALID_RESP;
+             debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry->url() << " AKA " << request->url);
+         } else {
+             error = ERR_ZERO_SIZE_OBJECT;
+             debugs(11, (request->flags.accelerated?DBG_IMPORTANT:2), "WARNING: HTTP: Invalid Response: No object data received for " << entry->url() << " AKA " << request->url);
+         }
+     }
+ 
+     assert(error != ERR_NONE);
+     entry->reset();
+     fwd->fail(new ErrorState(error, Http::scBadGateway, fwd->request));
+     flags.do_next_read = false;
+-    serverConnection->close();
++    closeServer();
++    mustStop("HttpStateData::continueAfterParsingHeader");
+     return false; // quit on error
+ }
+ 
+ /** truncate what we read if we read too much so that writeReplyBody()
+     writes no more than what we should have read */
+ void
+ HttpStateData::truncateVirginBody()
+ {
+     assert(flags.headers_parsed);
+ 
+     HttpReply *vrep = virginReply();
+     int64_t clen = -1;
+     if (!vrep->expectingBody(request->method, clen) || clen < 0)
+         return; // no body or a body of unknown size, including chunked
+ 
+     if (payloadSeen - payloadTruncated <= clen)
+         return; // we did not read too much or already took care of the extras
+ 
+     if (const int64_t extras = payloadSeen - payloadTruncated - clen) {
+         // server sent more that the advertised content length
+@@ -1582,69 +1584,69 @@
+ {
+     debugs(11, 5, HERE << serverConnection << ": size " << io.size << ": errflag " << io.flag << ".");
+ #if URL_CHECKSUM_DEBUG
+ 
+     entry->mem_obj->checkUrlChecksum();
+ #endif
+ 
+     if (io.size > 0) {
+         fd_bytes(io.fd, io.size, FD_WRITE);
+         statCounter.server.all.kbytes_out += io.size;
+         statCounter.server.http.kbytes_out += io.size;
+     }
+ 
+     if (io.flag == Comm::ERR_CLOSING)
+         return;
+ 
+     if (io.flag) {
+         ErrorState *err = new ErrorState(ERR_WRITE_ERROR, Http::scBadGateway, fwd->request);
+         err->xerrno = io.xerrno;
+         fwd->fail(err);
+-        serverConnection->close();
++        closeServer();
++        mustStop("HttpStateData::wroteLast");
+         return;
+     }
+ 
+     sendComplete();
+ }
+ 
+ /// successfully wrote the entire request (including body, last-chunk, etc.)
+ void
+ HttpStateData::sendComplete()
+ {
+     /*
+      * Set the read timeout here because it hasn't been set yet.
+      * We only set the read timeout after the request has been
+      * fully written to the peer.  If we start the timeout
+      * after connection establishment, then we are likely to hit
+      * the timeout for POST/PUT requests that have very large
+      * request bodies.
+      */
+     typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
+     AsyncCall::Pointer timeoutCall =  JobCallback(11, 5,
+                                       TimeoutDialer, this, HttpStateData::httpTimeout);
+ 
+     commSetConnTimeout(serverConnection, Config.Timeout.read, timeoutCall);
+     flags.request_sent = true;
+     request->hier.peer_http_request_sent = current_time;
+ }
+ 
+-// Close the HTTP server connection. Used by serverComplete().
+ void
+ HttpStateData::closeServer()
+ {
+     debugs(11,5, HERE << "closing HTTP server " << serverConnection << " this " << this);
+ 
+     if (Comm::IsConnOpen(serverConnection)) {
+         fwd->unregister(serverConnection);
+         comm_remove_close_handler(serverConnection->fd, closeHandler);
+         closeHandler = NULL;
+         serverConnection->close();
+     }
+ }
+ 
+ bool
+ HttpStateData::doneWithServer() const
+ {
+     return !Comm::IsConnOpen(serverConnection);
+ }
+ 
+ /*
+@@ -2409,41 +2411,42 @@
+ HttpStateData::handleMoreRequestBodyAvailable()
+ {
+     if (eof || !Comm::IsConnOpen(serverConnection)) {
+         // XXX: we should check this condition in other callbacks then!
+         // TODO: Check whether this can actually happen: We should unsubscribe
+         // as a body consumer when the above condition(s) are detected.
+         debugs(11, DBG_IMPORTANT, HERE << "Transaction aborted while reading HTTP body");
+         return;
+     }
+ 
+     assert(requestBodySource != NULL);
+ 
+     if (requestBodySource->buf().hasContent()) {
+         // XXX: why does not this trigger a debug message on every request?
+ 
+         if (flags.headers_parsed && !flags.abuse_detected) {
+             flags.abuse_detected = true;
+             debugs(11, DBG_IMPORTANT, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << request->client_addr << "' -> '" << entry->url() << "'" );
+ 
+             if (virginReply()->sline.status() == Http::scInvalidHeader) {
+-                serverConnection->close();
++                closeServer();
++                mustStop("HttpStateData::handleMoreRequestBodyAvailable");
+                 return;
+             }
+         }
+     }
+ 
+     HttpStateData::handleMoreRequestBodyAvailable();
+ }
+ 
+ // premature end of the request body
+ void
+ HttpStateData::handleRequestBodyProducerAborted()
+ {
+     Client::handleRequestBodyProducerAborted();
+     if (entry->isEmpty()) {
+         debugs(11, 3, "request body aborted: " << serverConnection);
+         // We usually get here when ICAP REQMOD aborts during body processing.
+         // We might also get here if client-side aborts, but then our response
+         // should not matter because either client-side will provide its own or
+         // there will be no response at all (e.g., if the the client has left).
+         ErrorState *err = new ErrorState(ERR_ICAP_FAILURE, Http::scInternalServerError, fwd->request);
+

+ 11 - 0
SOURCES/pinger_off_v4.patch

@@ -0,0 +1,11 @@
+--- src/cf.data.pre     2015-08-20 17:44:59.000000000 +0300
++++ /tmp/cf.data.pre.new      2015-08-21 02:06:16.775325134 +0300
+@@ -4948,7 +4948,7 @@
+
+ NAME: pinger_enable
+ TYPE: onoff
+-DEFAULT: on
++DEFAULT: off
+ LOC: IcmpCfg.enable
+ IFDEF: USE_ICMP
+ DOC_START

+ 15 - 0
SOURCES/squid.logrotate

@@ -0,0 +1,15 @@
+/var/log/squid/*.log {
+    weekly
+    rotate 5
+    compress
+    notifempty
+    missingok
+    sharedscripts
+    postrotate
+      # Asks squid to reopen its logs. (log_rotate 0 is set in squid.conf)
+      # errors redirected to make it silent if squid is not running
+      /usr/sbin/squid -k rotate 2>/dev/null
+      # Wait a little to allow Squid to catch up before the logs is compressed
+      sleep 1
+    endscript
+}

+ 5 - 0
SOURCES/squid.nm

@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "$2" = "up" ]; then
+	/sbin/service squid reload || :
+fi

+ 3 - 0
SOURCES/squid.pam

@@ -0,0 +1,3 @@
+#%PAM-1.0
+auth		include		password-auth
+account		include		password-auth

+ 7 - 0
SOURCES/squid.pam-0.77

@@ -0,0 +1,7 @@
+#%PAM-1.0
+auth       required     pam_stack.so service=system-auth
+auth       required     pam_nologin.so
+account    required     pam_stack.so service=system-auth
+password   required     pam_stack.so service=system-auth
+session    required     pam_stack.so service=system-auth
+session    required     pam_limits.so

+ 28 - 0
SOURCES/squid.service

@@ -0,0 +1,28 @@
+## Copyright (C) 1996-2015 The Squid Software Foundation and contributors
+##
+## Squid software is distributed under GPLv2+ license and includes
+## contributions from numerous individuals and organizations.
+## Please see the COPYING and CONTRIBUTORS files for details.
+##
+
+[Unit]
+Description=Squid Web Proxy Server
+Documentation=man:squid(8)
+After=network.target
+
+[Service]
+MemoryAccounting=true
+CPUAccounting=true
+Type=forking
+LimitNOFILE=16384
+PIDFile=/var/run/squid.pid
+ExecStartPre=/usr/bin/mkdir -p /var/run/squid
+ExecStartPre=/usr/bin/chown squid.squid /var/run/squid
+ExecStart=/usr/sbin/squid -sYC
+ExecReload=/usr/sbin/squid -kreconf
+ExecStop=/usr/sbin/squidshut.sh
+TimeoutStopSec=36
+KillMode=none
+
+[Install]
+WantedBy=multi-user.target

+ 9 - 0
SOURCES/squid.sysconfig

@@ -0,0 +1,9 @@
+# default squid options
+SQUID_OPTS=""
+
+# Time to wait for Squid to shut down when asked. Should not be necessary
+# most of the time.
+SQUID_SHUTDOWN_TIMEOUT=100
+
+# default squid conf file
+SQUID_CONF="/etc/squid/squid.conf"

+ 23 - 0
SOURCES/squidshut.sh

@@ -0,0 +1,23 @@
+#!/usr/bin/env sh
+SQUID_TIMEOUT=34
+SQUID=/usr/sbin/squid
+echo -n $"Stopping Squid: "
+$SQUID -k check >> /var/log/squid/squid.out 2>&1
+RETVAL=$?
+if [ $RETVAL -eq 0 ] ; then
+		$SQUID -k shutdown
+		timeout=0
+		while : ; do
+				[ -f /var/run/squid.pid ] || break
+				if [ $timeout -ge $SQUID_TIMEOUT ]; then
+						echo "Squid shutdown timeout ran out"
+						exit 1
+				fi
+				sleep 2 && echo -n "."
+				timeout=$((timeout+2))
+		done
+		echo "Finished shutting down squid"
+else
+		echo "Squid settings file Falied the check"
+fi
+exit $RETVAL

+ 79 - 0
SOURCES/suspendbyoptionsonly.patch

@@ -0,0 +1,79 @@
+=== modified file 'src/adaptation/ServiceConfig.cc'
+--- src/adaptation/ServiceConfig.cc	2016-01-07 05:21:47.000000000 +0200
++++ src/adaptation/ServiceConfig.cc	2016-01-10 16:30:54.654842542 +0200
+@@ -114,6 +114,8 @@
+         bool grokked = false;
+         if (strcmp(name, "bypass") == 0) {
+             grokked = grokBool(bypass, name, value);
++        } else if (strcmp(name, "suspend-by-options-fail-only") == 0) {
++            grokked = grokBool(suspendbyoptionsonly, name, value);
+         } else if (strcmp(name, "routing") == 0)
+             grokked = grokBool(routing, name, value);
+         else if (strcmp(name, "uri") == 0)
+@@ -148,7 +148,7 @@
+     debugs(3,5, cfg_filename << ':' << config_lineno << ": " <<
+            "adaptation_service " << key << ' ' <<
+            methodStr() << "_" << vectPointStr() << ' ' <<
+-           bypass << routing << ' ' <<
++           bypass << routing << suspendbyoptionsonly << ' ' <<
+            uri);
+ 
+     return true;
+=== modified file 'src/adaptation/ServiceConfig.h'
+--- src/adaptation/ServiceConfig.h	2016-01-07 05:21:47.000000000 +0200
++++ src/adaptation/ServiceConfig.h	2016-01-10 02:56:50.262544254 +0200
+@@ -40,6 +40,7 @@
+     Method method;   // what is being adapted (REQMOD vs RESPMOD)
+     VectPoint point; // where the adaptation happens (pre- or post-cache)
+     bool bypass;
++    bool suspendbyoptionsonly;
+ 
+     // options
+     long maxConn; ///< maximum number of concurrent service transactions
+
+=== modified file 'src/adaptation/icap/ServiceRep.cc'
+--- src/adaptation/icap/ServiceRep.cc	2016-01-07 05:21:47.000000000 +0200
++++ src/adaptation/icap/ServiceRep.cc	2016-01-10 03:08:43.983427319 +0200
+@@ -74,6 +74,12 @@
+ 
+ void Adaptation::Icap::ServiceRep::noteFailure()
+ {
++    // Expiremental feature
++    if (cfg().suspendbyoptionsonly) {
++        debugs(93,4, HERE << " failure was bypassed by \"suspendbyoptionsonly\" ON state" << status());
++        return;
++    }
++
+     const int failures = theSessionFailures.count(1);
+     debugs(93,4, HERE << " failure " << failures << " out of " <<
+            TheConfig.service_failure_limit << " allowed in " <<
+@@ -515,6 +521,9 @@
+     if (wasAnnouncedUp == up()) // no significant changes to announce
+         return;
+ 
++    if (cfg().suspendbyoptionsonly)
++	return;
++
+     const char *what = cfg().bypass ? "optional" : "essential";
+     const char *state = wasAnnouncedUp ? downPhrase : "up";
+     const int level = important ? 1 :2;
+
+=== modified file 'src/cf.data.pre'
+--- src/cf.data.pre	2016-01-10 03:14:08.826012175 +0200
++++ src/cf.data.pre	2016-01-10 03:14:13.944045332 +0200
+@@ -8330,6 +8330,14 @@
+ 
+ 		Bypass is off by default: services are treated as essential.
+ 
++	suspend-by-options-fail-only=on|off|1|0
++		Expiremental: If set to 'on' or '1', the ICAP service state 
++		will remain static between the scheduled OPTIONS probes.
++
++		suspend-by-options-fail-only is off by default: the state of 
++		the service will be changed to UP or DOWN by the icap failure 
++		basic settings.
++
+ 	routing=on|off|1|0
+ 		If set to 'on' or '1', the ICAP service is allowed to
+ 		dynamically change the current message adaptation plan by
+

+ 383 - 0
SPECS/squid4beta.spec

@@ -0,0 +1,383 @@
+%define release_number %(echo $RELEASE_NUMBER)
+%define version_number %(echo $SOURCES_VERSION)
+
+Name:     squid
+Version:  %{version_number}
+Release:  %{release_number}%{?dist}
+Summary:  The Squid proxy caching server
+Epoch:    7
+Packager: Eliezer Croitoru <eliezer@ngtech.co.il>
+Vendor:   NgTech Ltd
+# See CREDITS for breakdown of non GPLv2+ code
+License:  GPLv2+ and (LGPLv2+ and MIT and BSD and Public Domain)
+Group:    System Environment/Daemons
+URL:      http://www.squid-cache.org
+Source0:  http://www.squid-cache.org/Versions/v4/squid-%{version}.tar.xz
+Source1:  http://www.squid-cache.org/Versions/v4/squid-%{version}.tar.xz.asc
+Source2:  squid.service
+Source3:  squid.logrotate
+Source4:  squid.sysconfig
+Source5:  squid.pam
+Source6:  squid.nm
+Source7:  squidshut.sh
+Patch0:   pinger_off_v4.patch
+Patch1:   suspendbyoptionsonly.patch
+#Patch2:   assertion_FwdState.cc_serverConnection_eq_conn-trunk-t4-squid4.patch
+
+Buildroot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+Requires: bash >= 2.0
+Requires(pre): shadow-utils
+Requires(preun): systemd
+Requires(postun): systemd
+Requires: systemd-units
+Requires: libtool-ltdl
+Requires: libecap
+BuildRequires:  systemd-units
+# squid_ldap_auth and other LDAP helpers require OpenLDAP
+BuildRequires: openldap-devel
+# squid_pam_auth requires PAM development libs
+BuildRequires: pam-devel
+# SSL support requires OpenSSL
+BuildRequires: openssl-devel
+# squid_kerb_aut requires Kerberos development libs
+BuildRequires: krb5-devel
+# squid_session_auth requires DB4
+## BuildRequires: db4-devel
+# ESI support requires Expat & libxml2
+BuildRequires: expat-devel libxml2-devel
+# TPROXY requires libcap, and also increases security somewhat
+BuildRequires: libcap-devel
+# eCAP and some other need libltdl
+BuildRequires: libtool libtool-ltdl-devel
+# eCAP 1.0.0
+BuildRequires: libecap-devel libecap
+# Required to allow debug package auto creation
+BuildRequires: redhat-rpm-config
+# Required by couple external acl helpers
+BuildRequires: libdb-devel
+# Required for specific features
+BuildRequires: libnetfilter_conntrack-devel
+# Adding for future build use
+BuildRequires: gnutls-devel
+
+# Required to validate auto requires AutoReqProv: no
+## aaaAutoReqProv: no
+
+%description
+Squid is a high-performance proxy caching server for Web clients,
+supporting FTP, gopher, and HTTP data objects. Unlike traditional
+caching software, Squid handles all requests in a single,
+non-blocking, I/O-driven process. Squid keeps meta data and especially
+hot objects cached in RAM, caches DNS lookups, supports non-blocking
+DNS lookups, and implements negative caching of failed requests.
+
+Squid consists of a main server program squid, a Domain Name System
+lookup program (dnsserver), a program for retrieving FTP data
+(ftpget), and some management and client tools.
+
+%prep
+%setup -q
+%patch0
+%patch1
+#%patch2
+
+%package helpers
+Group: System Environment/Daemons
+Summary: Squid helpers
+Requires: %{name} = %{epoch}:%{version}-%{release}
+
+%description helpers
+The squid-helpers contains the external helpers.
+
+%build
+#was added due to new squid features that will be added soon
+export CXXFLAGS="$RPM_OPT_FLAGS -fPIC"
+export PERL=/usr/bin/perl
+
+%configure \
+  --exec_prefix=/usr \
+  --libexecdir=%{_libdir}/squid \
+  --localstatedir=/var \
+  --datadir=%{_datadir}/squid \
+  --sysconfdir=%{_sysconfdir}/squid \
+  --with-logdir='$(localstatedir)/log/squid' \
+  --with-pidfile='$(localstatedir)/run/squid.pid' \
+  --disable-dependency-tracking \
+  --enable-follow-x-forwarded-for \
+  --enable-auth \
+  --enable-auth-basic="DB,LDAP,NCSA,NIS,PAM,POP3,RADIUS,SASL,SMB,getpwnam,fake" \
+  --enable-auth-ntlm="fake" \
+  --enable-auth-digest="file,LDAP,eDirectory" \
+  --enable-auth-negotiate="kerberos,wrapper" \
+  --enable-external-acl-helpers="wbinfo_group,kerberos_ldap_group,LDAP_group,delayer,file_userip,SQL_session,unix_group,session,time_quota" \
+  --enable-cache-digests \
+  --enable-cachemgr-hostname=localhost \
+  --enable-delay-pools \
+  --enable-epoll \
+  --enable-icap-client \
+  --enable-ident-lookups \
+  %ifnarch ppc64 ia64 x86_64 s390x
+  --with-large-files \
+  %endif
+  --enable-linux-netfilter \
+  --enable-removal-policies="heap,lru" \
+  --enable-snmp \
+  --enable-storeio="aufs,diskd,ufs,rock" \
+  --enable-wccpv2 \
+  --enable-esi \
+  --enable-security-cert-generators  \
+  --enable-security-cert-validators \
+  --enable-icmp \
+  --with-aio \
+  --with-default-user="squid" \
+  --with-filedescriptors=16384 \
+  --with-dl \
+  --with-openssl \
+  --enable-ssl-crtd \
+  --with-pthreads \
+  --with-included-ltdl \
+  --disable-arch-native \
+  --enable-ecap \
+  --without-nettle
+
+make \
+	DEFAULT_SWAP_DIR='$(localstatedir)/spool/squid' \
+	%{?_smp_mflags}
+
+#%install
+%if %{?fedora}00%{?rhel} < 6
+sed -i 's|password-auth|system-auth|' %{SOURCE5}
+%endif
+rm -rf $RPM_BUILD_ROOT
+make \
+	DESTDIR=$RPM_BUILD_ROOT \
+	install
+mkdir -p ${RPM_BUILD_ROOT}%{_unitdir}
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/rc.d/init.d
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/pam.d
+mkdir -p $RPM_BUILD_ROOT/usr/libexec/squid
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/NetworkManager/dispatcher.d
+install -m 644 %{SOURCE3} $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/squid
+install -m 644 %{SOURCE4} $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig/squid
+install -m 644 %{SOURCE5} $RPM_BUILD_ROOT%{_sysconfdir}/pam.d/squid
+install -m 644 %{SOURCE6} $RPM_BUILD_ROOT%{_sysconfdir}/NetworkManager/dispatcher.d/20-squid
+install -m 644 %{SOURCE2} $RPM_BUILD_ROOT%{_unitdir}/squid.service
+install -m 755 %{SOURCE7} $RPM_BUILD_ROOT%{_sbindir}/squidshut.sh
+
+mkdir -p $RPM_BUILD_ROOT/var/log/squid
+mkdir -p $RPM_BUILD_ROOT/var/spool/squid
+#chmod 644 contrib/url-normalizer.pl contrib/rredir.* contrib/user-agents.pl
+iconv -f ISO88591 -t UTF8 ChangeLog -o ChangeLog.tmp
+mv -f ChangeLog.tmp ChangeLog
+
+# Move the MIB definition to the proper place (and name)
+mkdir -p $RPM_BUILD_ROOT/usr/share/snmp/mibs
+mv $RPM_BUILD_ROOT/usr/share/squid/mib.txt $RPM_BUILD_ROOT/usr/share/snmp/mibs/SQUID-MIB.txt
+
+# squid.conf.documented is documentation. We ship that in doc/
+rm -f $RPM_BUILD_ROOT%{_sysconfdir}/squid/squid.conf.documented
+
+# remove unpackaged files from the buildroot
+rm -f $RPM_BUILD_ROOT%{_bindir}/{RunAccel,RunCache}
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root,-)
+%doc COPYING README CREDITS ChangeLog QUICKSTART src/squid.conf.documented
+#%doc contrib/url-normalizer.pl contrib/rredir.* contrib/user-agents.pl
+
+%attr(755,root,root) %dir %{_sysconfdir}/squid
+%attr(755,root,root) %dir %{_libdir}/squid
+%attr(750,squid,squid) %dir /var/log/squid
+%attr(750,squid,squid) %dir /var/spool/squid
+
+%config(noreplace) %attr(640,root,squid) %{_sysconfdir}/squid/squid.conf
+%config(noreplace) %attr(644,root,squid) %{_sysconfdir}/squid/cachemgr.conf
+%config(noreplace) %{_sysconfdir}/squid/mime.conf
+%config(noreplace) %{_sysconfdir}/squid/errorpage.css
+%config(noreplace) %{_sysconfdir}/sysconfig/squid
+# These are not noreplace because they are just sample config files
+%config %{_sysconfdir}/squid/squid.conf.default
+%config %{_sysconfdir}/squid/mime.conf.default
+%config %{_sysconfdir}/squid/errorpage.css.default
+%config %{_sysconfdir}/squid/cachemgr.conf.default
+%config(noreplace) %{_sysconfdir}/pam.d/squid
+%config(noreplace) %{_sysconfdir}/logrotate.d/squid
+
+%dir %{_datadir}/squid
+%attr(-,root,root) %{_datadir}/squid/errors
+%attr(755,root,root) %{_sysconfdir}/NetworkManager/dispatcher.d/20-squid
+%attr(755,root,root) %{_sbindir}/squidshut.sh
+%{_datadir}/squid/icons
+%{_sbindir}/squid
+%{_bindir}/squidclient
+%{_bindir}/purge
+%{_mandir}/man8/*
+%{_mandir}/man1/*
+%{_libdir}/squid/diskd
+%{_libdir}/squid/log_file_daemon
+%{_libdir}/squid/unlinkd
+%attr(4755,root,root) %{_libdir}/squid/pinger
+
+%{_datadir}/snmp/mibs/SQUID-MIB.txt
+%{_unitdir}/squid.service
+
+%files helpers
+%{_libdir}/squid/basic_db_auth
+%{_libdir}/squid/basic_getpwnam_auth
+%{_libdir}/squid/basic_ldap_auth
+%{_libdir}/squid/basic_ncsa_auth
+%{_libdir}/squid/basic_nis_auth
+%{_libdir}/squid/basic_pam_auth
+%{_libdir}/squid/basic_pop3_auth
+%{_libdir}/squid/basic_radius_auth
+%{_libdir}/squid/basic_sasl_auth
+%{_libdir}/squid/basic_smb_auth
+%{_libdir}/squid/basic_smb_auth.sh
+%{_libdir}/squid/basic_fake_auth
+%{_libdir}/squid/cachemgr.cgi
+%{_libdir}/squid/cert_tool
+#%{_libdir}/squid/cert_valid.pl
+%{_libdir}/squid/digest_file_auth
+%{_libdir}/squid/digest_ldap_auth
+%{_libdir}/squid/digest_edirectory_auth
+%{_libdir}/squid/ext_kerberos_ldap_group_acl
+%{_libdir}/squid/ext_wbinfo_group_acl
+%{_libdir}/squid/helper-mux
+%{_libdir}/squid/url_lfs_rewrite
+%{_libdir}/squid/log_db_daemon
+%{_libdir}/squid/negotiate_kerberos_auth
+%{_libdir}/squid/negotiate_kerberos_auth_test
+%{_libdir}/squid/negotiate_wrapper_auth
+%{_libdir}/squid/ntlm_fake_auth
+#%{_libdir}/squid/ntlm_smb_lm_auth
+#%{_libdir}/squid/ssl_crtd
+%{_libdir}/squid/storeid_file_rewrite
+%{_libdir}/squid/url_fake_rewrite
+%{_libdir}/squid/url_fake_rewrite.sh
+%{_libdir}/squid/ext_delayer_acl
+%{_libdir}/squid/ext_file_userip_acl
+%{_libdir}/squid/ext_ldap_group_acl
+%{_libdir}/squid/ext_session_acl
+%{_libdir}/squid/ext_sql_session_acl
+%{_libdir}/squid/ext_time_quota_acl
+%{_libdir}/squid/ext_unix_group_acl
+
+%{_libdir}/squid/helper-mux
+%{_libdir}/squid/security_fake_certverify
+%{_libdir}/squid/security_file_certgen
+%{_libdir}/squid/url_lfs_rewrite
+
+#error: File not found: /home/rpm/rpmbuild/BUILDROOT/squid-4.0.5-1.el7.centos.x86_64/usr/lib64/squid/cert_valid.pl
+#error: File not found: /home/rpm/rpmbuild/BUILDROOT/squid-4.0.5-1.el7.centos.x86_64/usr/lib64/squid/ntlm_smb_lm_auth
+#error: File not found: /home/rpm/rpmbuild/BUILDROOT/squid-4.0.5-1.el7.centos.x86_64/usr/lib64/squid/ssl_crtd
+
+
+%pre
+if ! getent group squid >/dev/null 2>&1; then
+  /usr/sbin/groupadd -g 23 squid
+fi
+
+if ! getent passwd squid >/dev/null 2>&1 ; then
+  /usr/sbin/useradd -g 23 -u 23 -d /var/spool/squid -r -s /sbin/nologin squid >/dev/null 2>&1 || exit 1 
+fi
+
+for i in /var/log/squid /var/spool/squid ; do
+        if [ -d $i ] ; then
+                for adir in `find $i -maxdepth 0 \! -user squid`; do
+                        chown -R squid:squid $adir
+                done
+        fi
+done
+
+exit 0
+
+%post
+echo "squid.conf.documented is at /usr/share/squid-%{version}/squid.conf.documented"
+%systemd_post squid.service
+
+%preun
+%systemd_preun squid.service
+
+%postun
+%systemd_postun_with_restart squid.service
+
+%postun helpers
+%triggerin -- samba-common
+if ! getent group wbpriv >/dev/null 2>&1 ; then
+  /usr/sbin/groupadd -g 88 wbpriv >/dev/null 2>&1 || :
+fi
+/usr/sbin/usermod -a -G wbpriv squid >/dev/null 2>&1 || \
+    chgrp squid /var/lib/samba/winbindd_privileged >/dev/null 2>&1 || :
+    chmod 750 /var/lib/samba/winbindd_privileged  >/dev/null 2>&1 || :
+
+%changelog
+* Mon Mar 07 2016 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 4.0.7-2 Beta, This is a testing version.
++ Patched with fix for bug 4447.
+
+* Wed Feb 24 2016 Eliezer Croiotru <eliezer@ngtech.co.il>
+- Release 4.0.7-1 Beta, This is a testing version.
+
+* Tue Feb 16 2016 Eliezer Croiotru <eliezer@ngtech.co.il>
+- Release 4.0.5-1 Beta, This is a testing version.
+
+* Sun Feb 14 2016 Eliezer Croiotru <eliezer@ngtech.co.il>
+- Release 4.0.4-3 Beta, This is a testing version.
+- I added the squidshut.sh script which was missing from an unknown reason.
+
+* Sun Jan 10 2016 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 4.0.4 Beta, this is a testing version!!
+- Added a patch to the ICAP service that allows to use only an OPTIONS
+  fetch faliure to change the service state from UP to DOWN(suspended).
+
+* Wed Dec 09 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 4.0.3-1 Beta, this version has some bugs!
+- If you can help to debug and\or help patch or sponsor a fix it will help a lot! 
+
+* Mon Oct 26 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 4.0.1-4 Beta, daily tar.gz 26-10-2015
+
+* Mon Oct 19 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 4.0.1-2 Beta.
+- Release 4.0.1-3 Beta with libecap support.
+
+* Thu Oct 15 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 4.0.1 Beta.
+
+* Fri Aug 21 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 4.0. Beta.
+- Added /url_lfs_rewrite
+- Changed helper-mux.pl to helper-mux
+- Release 2 includes systemd service instead of a init script. 
+- Release 3 a fix in the requirments of squid helpers.
+
+* Mon Aug 17 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 3.5.7 Stable.
+- Default disabling pinger due to selinux issues.
+
+* Tue Jul 07 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 3.5.6 Stable.
+- Adding edirectory digest helper back.
+
+* Thu May 28 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 3.5.4 Stable.
+
+* Fri May 22 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 3.5.4 Stable.
+
+* Fri Apr 24 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 3.5.3 Stable.
+- Removed eDirectory helper.
+- Removed AD_Group helper.
+
+* Wed Mar 04 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 3.5.2 Stable.
+
+* Mon Jan 12 2015 Eliezer Croitoru <eliezer@ngtech.co.il>
+- Release 3.5.0.4 Beta.
+- COPYRIGHT content was moved into README.

+ 21 - 0
check_release.pl

@@ -0,0 +1,21 @@
+#!/usr/bin/env perl
+
+use LWP::Simple;                # From CPAN
+use JSON qw( decode_json );     # From CPAN
+use Data::Dumper;               # Perl core module
+use strict;                     # Good practice
+use warnings;                   # Good practice
+
+my $versionsurl = "http://ngtech.co.il/ruby/squid/versions.cgi";
+
+my $json = get( $versionsurl );
+die "Could not get $versionsurl!" unless defined $json;
+
+# Decode the entire JSON
+my $decoded_json = decode_json( $json );
+
+# you'll get this (it'll print out); comment this when done.
+#print Dumper $decoded_json;
+
+# Access the shares like this:
+print $decoded_json->{$ARGV[0]},"\n";

+ 12 - 0
check_release.rb

@@ -0,0 +1,12 @@
+#!/usr/bin/env ruby
+
+require 'net/http'
+require "open-uri"
+require "json"
+
+apiurl = "http://ngtech.co.il/ruby/squid/versions.cgi"
+uri = URI(apiurl)
+response = Net::HTTP.get(uri)
+versions = JSON.parse(response)
+
+puts versions[ARGV[0]]

+ 89 - 0
config.xml

@@ -0,0 +1,89 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<project>
+  <actions/>
+  <description></description>
+  <keepDependencies>false</keepDependencies>
+  <properties>
+    <hudson.model.ParametersDefinitionProperty>
+      <parameterDefinitions>
+        <hudson.model.StringParameterDefinition>
+          <name>RELEASE_NUMBER</name>
+          <description>This is the RPM release number</description>
+          <defaultValue>1</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>SOURCES_VERSION</name>
+          <description>This is the software version to be downloaded from the sources site.</description>
+          <defaultValue>4.0.7</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.BooleanParameterDefinition>
+          <name>DEPLOY_RPMS</name>
+          <description>Instructs the build scripts to deploy the RPMS on  the repository service.</description>
+          <defaultValue>false</defaultValue>
+        </hudson.model.BooleanParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>SMP_FLAG</name>
+          <description></description>
+          <defaultValue>5</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.BooleanParameterDefinition>
+          <name>OVERRIDE_VERSION_TO_LATEST</name>
+          <description>The buildrpms target will use an automated macro to choose the latest sources release based on the JSON API in the branch.</description>
+          <defaultValue>true</defaultValue>
+        </hudson.model.BooleanParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>SPEC_FILE</name>
+          <description>The spec file to use in order to build the RPMs.</description>
+          <defaultValue>squid4beta.spec</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>VERSION_BRANCH</name>
+          <description>Sources branch</description>
+          <defaultValue>4.0</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.BooleanParameterDefinition>
+          <name>ENABLE_CCACHE</name>
+          <description>Use ccache in this build (sets USE_CCACHE=1 or 0)</description>
+          <defaultValue>true</defaultValue>
+        </hudson.model.BooleanParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>ALTERNATE_PATH</name>
+          <description>A full alternative PATH to the environemtn</description>
+          <defaultValue>/usr/lib64/ccache:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.BooleanParameterDefinition>
+          <name>OVERRIDE_PATH</name>
+          <description>Turn on and off ALTERNATE_PATH usage.</description>
+          <defaultValue>false</defaultValue>
+        </hudson.model.BooleanParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>CCACHE_LINKS_PATH</name>
+          <description>g++\gcc\cc\c++ ccache links location</description>
+          <defaultValue>/usr/lib64/ccache</defaultValue>
+        </hudson.model.StringParameterDefinition>
+      </parameterDefinitions>
+    </hudson.model.ParametersDefinitionProperty>
+  </properties>
+  <scm class="hudson.scm.NullSCM"/>
+  <canRoam>true</canRoam>
+  <disabled>false</disabled>
+  <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
+  <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
+  <triggers/>
+  <concurrentBuild>false</concurrentBuild>
+  <builders>
+    <hudson.tasks.Shell>
+      <command>#make gitfetch
+rm -rf clone
+rm -rf build
+/usr/bin/git clone http://gogs.ngtech.co.il/elicro/squid-centos7-squid4-rpms clone
+mv clone build
+cd build
+
+make crlfclean
+make buildrpms
+make deployrpms</command>
+    </hudson.tasks.Shell>
+  </builders>
+  <publishers/>
+  <buildWrappers/>

+ 90 - 0
config.xml-fedora

@@ -0,0 +1,90 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<project>
+  <actions/>
+  <description></description>
+  <keepDependencies>false</keepDependencies>
+  <properties>
+    <hudson.model.ParametersDefinitionProperty>
+      <parameterDefinitions>
+     <hudson.model.StringParameterDefinition>
+          <name>RELEASE_NUMBER</name>
+          <description>This is the RPM release number</description>
+          <defaultValue>1</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>SOURCES_VERSION</name>
+          <description>This is the software version to be downloaded from the sources site.</description>
+          <defaultValue>4.0.7</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.BooleanParameterDefinition>
+          <name>DEPLOY_RPMS</name>
+          <description>Instructs the build scripts to deploy the RPMS on  the repository service.</description>
+          <defaultValue>false</defaultValue>
+        </hudson.model.BooleanParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>SMP_FLAG</name>
+          <description></description>
+          <defaultValue>5</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.BooleanParameterDefinition>
+          <name>OVERRIDE_VERSION_TO_LATEST</name>
+          <description>The buildrpms target will use an automated macro to choose the latest sources release based on the JSON API in the branch.</description>
+          <defaultValue>true</defaultValue>
+        </hudson.model.BooleanParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>SPEC_FILE</name>
+          <description>The spec file to use in order to build the RPMs.</description>
+          <defaultValue>squid4beta.spec</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>VERSION_BRANCH</name>
+          <description>Sources branch</description>
+          <defaultValue>4.0</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.BooleanParameterDefinition>
+          <name>ENABLE_CCACHE</name>
+          <description>Use ccache in this build (sets USE_CCACHE=1 or 0)</description>
+          <defaultValue>true</defaultValue>
+        </hudson.model.BooleanParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>ALTERNATE_PATH</name>
+          <description>A full alternative PATH to the environemtn</description>
+          <defaultValue>/usr/lib64/ccache:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin</defaultValue>
+        </hudson.model.StringParameterDefinition>
+        <hudson.model.BooleanParameterDefinition>
+          <name>OVERRIDE_PATH</name>
+          <description>Turn on and off ALTERNATE_PATH usage.</description>
+          <defaultValue>false</defaultValue>
+        </hudson.model.BooleanParameterDefinition>
+        <hudson.model.StringParameterDefinition>
+          <name>CCACHE_LINKS_PATH</name>
+          <description>g++\gcc\cc\c++ ccache links location</description>
+          <defaultValue>/usr/lib64/ccache</defaultValue>
+        </hudson.model.StringParameterDefinition>
+      </parameterDefinitions>
+    </hudson.model.ParametersDefinitionProperty>
+  </properties>
+  <scm class="hudson.scm.NullSCM"/>
+  <canRoam>true</canRoam>
+  <disabled>false</disabled>
+  <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
+  <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
+  <triggers/>
+  <concurrentBuild>false</concurrentBuild>
+  <builders>
+    <hudson.tasks.Shell>
+      <command>rm -rf clone
+rm -rf build
+/usr/bin/git clone http://gitlab.ngtech.local/elico/squid-centos7-squid4-rpms.git clone
+mv clone build
+cd build
+
+make crlfclean
+make buildrpms
+make deployrpms
+</command>
+    </hudson.tasks.Shell>
+  </builders>
+  <publishers/>
+  <buildWrappers/>
+</project>

+ 15 - 0
deploy.sh

@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+case $DEPLOY_RPMS in
+	false)
+#	echo $DEPLOY_RPMS
+	echo "Deploying the RPMS is not required by the build!"
+
+	;;
+	true)
+#	echo $DEPLOY_RPMS
+	echo "Starting RPMS deployment!"
+
+	;;
+	*)
+	exit 1
+esac

+ 139 - 0
run.sh

@@ -0,0 +1,139 @@
+#!/usr/bin/env bash
+
+## Validate variables
+# $SOURCES_VERSION
+# $RELEASE_NUMBER
+# $SPEC_FILE
+# $OVERRIDE_VERSION_TO_LATEST
+# $CCACHE_LINKS_PATH
+# $OVERRIDE_PATH
+# $ALTERNATE_PATH
+
+#PATH=$CCACHE_PATH:$PATH
+#export PATH
+
+echo "Printing original environmental variables:"
+env
+echo "End of variables"
+
+case $ENABLE_CCACHE in
+        false)
+		echo "disabling ccache usage"
+		USE_CCACHE=0
+        ;;
+        *)
+		echo "using default ccache usage"
+#		USE_CCACHE=1
+esac
+
+echo -n "USE_CCACHE="
+echo $USE_CCACHE
+
+case $CLEAR_CCACHE in
+        true)
+		echo "Clearing ccache data"
+		/usr/bin/ccache -C
+        ;;
+        *)
+		echo "ccache was not cleared"
+esac
+
+echo -n "Environment PATH => "
+echo $PATH
+
+export PATH=$CCACHE_LINKS_PATH:$PATH
+echo -n "Modifed Environment PATH => "
+echo $PATH
+
+gcc -v 
+
+case $OVERRIDE_PATH in
+        true)
+		echo "Using the build parameters PATH"
+		PATH=$ALTERNATE_PATH
+		export PATH
+		echo -n "Modifed Environment PATH => "
+		echo $PATH
+		gcc -v 
+        ;;
+        *)
+        	echo "Using default PATH"
+esac
+
+
+# Clean up and create directories
+for dir in BUILD RPMS SRPMS
+do
+ [[ -d $dir ]] && rm -Rf $dir
+  mkdir $dir
+done
+
+for dir in SOURCES SPECS
+do
+  mkdir $dir
+done
+
+VERSION_CHECKED="false"
+case $OVERRIDE_VERSION_TO_LATEST in
+        true)
+		# CHECK the latest vesion and set $SOURCES_VERSION
+		echo "Checking for latest sources release"
+		if [ -x "/usr/bin/ruby" ]
+	       	then
+			SOURCES_VERSION=`/usr/bin/ruby check_release.rb $VERSION_BRANCH`
+			VERSION_CHECKED="true"
+		fi
+
+		if [ -x "/usr/bin/perl" ] && [ "$VERSION_CHECKED" =  "false" ]
+	       	then
+			SOURCES_VERSION=`/usr/bin/perl check_release.pl $VERSION_BRANCH`
+			VERSION_CHECKED="true"
+		fi
+        ;;
+        *)
+        	echo "Using sources for release from the build parameters"
+		VERSION_CHECKED="true"
+esac
+
+case $VERSION_CHECKED in
+	false)
+		echo "couldn't check the latest sources version"
+		exit 1
+	;;
+	*)
+		echo "sources version was checked against the API or was manully configured"
+esac
+
+echo -n "Sources version for the build: "
+echo $SOURCES_VERSION
+
+
+
+## fetch current stable version from a url
+## Download squid sources(tar.xz+asc) if doesn't exists
+if [ -e "SOURCES/squid-$SOURCES_VERSION.tar.xz" ]
+then
+  echo "tar.xz already exists"
+else
+  wget -O "SOURCES/squid-$SOURCES_VERSION.tar.xz" "http://ngtech.co.il/squid/src/squid-$SOURCES_VERSION.tar.xz"
+fi
+
+if [ -e "SOURCES/squid-$SOURCES_VERSION.tar.xz.asc" ]
+then
+  echo "tar.xz.asc already exists"
+else
+  wget -O "SOURCES/squid-$SOURCES_VERSION.tar.xz.asc" "http://ngtech.co.il/squid/src/squid-$SOURCES_VERSION.tar.xz.asc"
+fi
+
+# Create rpm in RPMS/noarch/
+#rpmbuild --define '_topdir '`pwd` --define '_smp_mflags -j5' -ba SPECS/helloworld.spec
+echo -n "SMP FLAGS "
+echo $SMP_FLAG
+echo -n "Will be using the SPEC file for the build: "
+echo $SPEC_FILE
+
+echo "Printing changed environmental variables:"
+env
+echo "End of variables"
+
+rpmbuild --define '_topdir '`pwd` --define "_smp_mflags -j$SMP_FLAG" -ba -vv SPECS/$SPEC_FILE