mirror of
https://github.com/WhatCD/Ocelot.git
synced 2026-01-16 19:05:03 -05:00
Ocelot 0.7
This commit is contained in:
13
CHANGES
13
CHANGES
@@ -1,3 +1,16 @@
|
||||
-- 0.7 (2013-12-05)
|
||||
Add bintohex function to use when printing binary data to stdout
|
||||
Check if compiler supports -std=c++11
|
||||
Collect some stats and add a method that prints them
|
||||
Create a function for adding peers
|
||||
Fix a potential problem where a deleted pointer was being accessed
|
||||
Improve the anti-web crawler code
|
||||
Make use of the peer iterator for faster deletions
|
||||
Remove most of the dependencies on boost and move all thread-related calls to the STL components provided in C++11
|
||||
Run the reaper routines in a single thread to clean up stdout
|
||||
Separate the sections responsible for output to make it easier to change http headers if needed
|
||||
Update the db's torrents table when the peer reaper deletes the last peer on a torrent
|
||||
|
||||
-- 0.6 (2013-03-19)
|
||||
Add --with-tcmalloc option to the configure script to enable Google's high-performance multi-threaded malloc() implementation
|
||||
Configurable site path
|
||||
|
||||
15
Makefile.am
15
Makefile.am
@@ -1,12 +1,15 @@
|
||||
ACLOCAL_AMFLAGS = -I m4
|
||||
|
||||
bin_PROGRAMS = ocelot
|
||||
ocelot_SOURCES = config.h db.cpp db.h events.cpp events.h \
|
||||
misc_functions.cpp misc_functions.h ocelot.cpp ocelot.h \
|
||||
schedule.cpp schedule.h site_comm.cpp site_comm.h worker.cpp worker.h
|
||||
ocelot_SOURCES = config.h db.cpp db.h events.cpp events.h misc_functions.cpp misc_functions.h\
|
||||
ocelot.cpp ocelot.h report.cpp report.h response.cpp response.h \
|
||||
schedule.cpp schedule.h site_comm.cpp site_comm.h user.cpp user.h worker.cpp worker.h
|
||||
nodist_ocelot_SOURCES = config.cpp
|
||||
|
||||
AM_CPPFLAGS = -march=native -O2 -fvisibility=hidden -fvisibility-inlines-hidden -fomit-frame-pointer -fno-ident -pthread $(BOOST_CPPFLAGS) -I$(MYSQLPP_INC_DIR) -I$(EV_INCLUDE_DIR)
|
||||
ocelot_LDFLAGS = -pthread $(BOOST_LDFLAGS) -L$(EV_LIB_DIR) -L$(MYSQLPP_LIB_DIR)
|
||||
ocelot_LDADD = $(BOOST_IOSTREAMS_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_THREAD_LIB) -lev -lmysqlpp
|
||||
AM_CPPFLAGS = -std=c++11 -march=native -O2 -fvisibility=hidden -fvisibility-inlines-hidden -fomit-frame-pointer -fno-ident -pthread -Wall -Wfatal-errors -Wl,O1 -Wl,--as-needed $(BOOST_CPPFLAGS)
|
||||
ocelot_LDFLAGS = -pthread $(BOOST_LDFLAGS)
|
||||
ocelot_LDADD = $(BOOST_IOSTREAMS_LIB) $(BOOST_SYSTEM_LIB) -lev -lmysqlpp
|
||||
EXTRA_DIST = CHANGES LICENSE config.cpp.template
|
||||
dist-hook:
|
||||
touch ${distdir}/configure
|
||||
patch -p2 -d ${distdir} < ../dist.patch
|
||||
|
||||
128
Makefile.in
128
Makefile.in
@@ -1,7 +1,7 @@
|
||||
# Makefile.in generated by automake 1.13.1 from Makefile.am.
|
||||
# Makefile.in generated by automake 1.14 from Makefile.am.
|
||||
# @configure_input@
|
||||
|
||||
# Copyright (C) 1994-2012 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1994-2013 Free Software Foundation, Inc.
|
||||
|
||||
# This Makefile.in is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -15,23 +15,51 @@
|
||||
@SET_MAKE@
|
||||
|
||||
VPATH = @srcdir@
|
||||
am__make_dryrun = \
|
||||
{ \
|
||||
am__dry=no; \
|
||||
am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
|
||||
am__make_running_with_option = \
|
||||
case $${target_option-} in \
|
||||
?) ;; \
|
||||
*) echo "am__make_running_with_option: internal error: invalid" \
|
||||
"target option '$${target_option-}' specified" >&2; \
|
||||
exit 1;; \
|
||||
esac; \
|
||||
has_opt=no; \
|
||||
sane_makeflags=$$MAKEFLAGS; \
|
||||
if $(am__is_gnu_make); then \
|
||||
sane_makeflags=$$MFLAGS; \
|
||||
else \
|
||||
case $$MAKEFLAGS in \
|
||||
*\\[\ \ ]*) \
|
||||
echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
|
||||
| grep '^AM OK$$' >/dev/null || am__dry=yes;; \
|
||||
*) \
|
||||
for am__flg in $$MAKEFLAGS; do \
|
||||
case $$am__flg in \
|
||||
*=*|--*) ;; \
|
||||
*n*) am__dry=yes; break;; \
|
||||
esac; \
|
||||
done;; \
|
||||
bs=\\; \
|
||||
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
|
||||
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
|
||||
esac; \
|
||||
test $$am__dry = yes; \
|
||||
}
|
||||
fi; \
|
||||
skip_next=no; \
|
||||
strip_trailopt () \
|
||||
{ \
|
||||
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
|
||||
}; \
|
||||
for flg in $$sane_makeflags; do \
|
||||
test $$skip_next = yes && { skip_next=no; continue; }; \
|
||||
case $$flg in \
|
||||
*=*|--*) continue;; \
|
||||
-*I) strip_trailopt 'I'; skip_next=yes;; \
|
||||
-*I?*) strip_trailopt 'I';; \
|
||||
-*O) strip_trailopt 'O'; skip_next=yes;; \
|
||||
-*O?*) strip_trailopt 'O';; \
|
||||
-*l) strip_trailopt 'l'; skip_next=yes;; \
|
||||
-*l?*) strip_trailopt 'l';; \
|
||||
-[dEDm]) skip_next=yes;; \
|
||||
-[JT]) skip_next=yes;; \
|
||||
esac; \
|
||||
case $$flg in \
|
||||
*$$target_option*) has_opt=yes; break;; \
|
||||
esac; \
|
||||
done; \
|
||||
test $$has_opt = yes
|
||||
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
|
||||
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
|
||||
pkgdatadir = $(datadir)/@PACKAGE@
|
||||
pkgincludedir = $(includedir)/@PACKAGE@
|
||||
pkglibdir = $(libdir)/@PACKAGE@
|
||||
@@ -52,15 +80,16 @@ build_triplet = @build@
|
||||
bin_PROGRAMS = ocelot$(EXEEXT)
|
||||
subdir = .
|
||||
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
|
||||
$(top_srcdir)/configure $(am__configure_deps) depcomp \
|
||||
$(top_srcdir)/configure $(am__configure_deps) depcomp compile \
|
||||
config.guess config.sub install-sh missing
|
||||
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
|
||||
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_boost_base.m4 \
|
||||
$(top_srcdir)/m4/ax_boost_iostreams.m4 \
|
||||
$(top_srcdir)/m4/ax_boost_system.m4 \
|
||||
$(top_srcdir)/m4/ax_boost_thread.m4 $(top_srcdir)/m4/ev++.m4 \
|
||||
$(top_srcdir)/m4/mysql++.m4 $(top_srcdir)/m4/mysql_loc.m4 \
|
||||
$(top_srcdir)/m4/tcmalloc.m4 $(top_srcdir)/configure.ac
|
||||
$(top_srcdir)/m4/ax_check_compile_flag.m4 \
|
||||
$(top_srcdir)/m4/ev++.m4 $(top_srcdir)/m4/mysql++.m4 \
|
||||
$(top_srcdir)/m4/mysql_loc.m4 $(top_srcdir)/m4/tcmalloc.m4 \
|
||||
$(top_srcdir)/configure.ac
|
||||
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
|
||||
$(ACLOCAL_M4)
|
||||
am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
|
||||
@@ -71,13 +100,14 @@ CONFIG_CLEAN_VPATH_FILES =
|
||||
am__installdirs = "$(DESTDIR)$(bindir)"
|
||||
PROGRAMS = $(bin_PROGRAMS)
|
||||
am_ocelot_OBJECTS = db.$(OBJEXT) events.$(OBJEXT) \
|
||||
misc_functions.$(OBJEXT) ocelot.$(OBJEXT) schedule.$(OBJEXT) \
|
||||
site_comm.$(OBJEXT) worker.$(OBJEXT)
|
||||
misc_functions.$(OBJEXT) ocelot.$(OBJEXT) report.$(OBJEXT) \
|
||||
response.$(OBJEXT) schedule.$(OBJEXT) site_comm.$(OBJEXT) \
|
||||
user.$(OBJEXT) worker.$(OBJEXT)
|
||||
nodist_ocelot_OBJECTS = config.$(OBJEXT)
|
||||
ocelot_OBJECTS = $(am_ocelot_OBJECTS) $(nodist_ocelot_OBJECTS)
|
||||
am__DEPENDENCIES_1 =
|
||||
ocelot_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
|
||||
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
|
||||
$(am__DEPENDENCIES_1)
|
||||
AM_V_P = $(am__v_P_@AM_V@)
|
||||
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
|
||||
am__v_P_0 = false
|
||||
@@ -175,7 +205,6 @@ BOOST_CPPFLAGS = @BOOST_CPPFLAGS@
|
||||
BOOST_IOSTREAMS_LIB = @BOOST_IOSTREAMS_LIB@
|
||||
BOOST_LDFLAGS = @BOOST_LDFLAGS@
|
||||
BOOST_SYSTEM_LIB = @BOOST_SYSTEM_LIB@
|
||||
BOOST_THREAD_LIB = @BOOST_THREAD_LIB@
|
||||
CC = @CC@
|
||||
CCDEPMODE = @CCDEPMODE@
|
||||
CFLAGS = @CFLAGS@
|
||||
@@ -272,13 +301,13 @@ top_build_prefix = @top_build_prefix@
|
||||
top_builddir = @top_builddir@
|
||||
top_srcdir = @top_srcdir@
|
||||
ACLOCAL_AMFLAGS = -I m4
|
||||
ocelot_SOURCES = config.h db.cpp db.h events.cpp events.h \
|
||||
misc_functions.cpp misc_functions.h ocelot.cpp ocelot.h \
|
||||
schedule.cpp schedule.h site_comm.cpp site_comm.h worker.cpp worker.h
|
||||
ocelot_SOURCES = config.h db.cpp db.h events.cpp events.h misc_functions.cpp misc_functions.h\
|
||||
ocelot.cpp ocelot.h report.cpp report.h response.cpp response.h \
|
||||
schedule.cpp schedule.h site_comm.cpp site_comm.h user.cpp user.h worker.cpp worker.h
|
||||
|
||||
nodist_ocelot_SOURCES = config.cpp
|
||||
AM_CPPFLAGS = -march=native -O2 -fvisibility=hidden -fvisibility-inlines-hidden -fomit-frame-pointer -fno-ident $(BOOST_CPPFLAGS) -I$(MYSQLPP_INC_DIR) -I$(EV_INCLUDE_DIR)
|
||||
ocelot_LDADD = $(BOOST_LDFLAGS) $(BOOST_IOSTREAMS_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_THREAD_LIB) -L$(EV_LIB_DIR) -L$(MYSQLPP_LIB_DIR) -lev -lmysqlpp
|
||||
AM_CPPFLAGS = -std=c++11 -march=native -O2 -fvisibility=hidden -fvisibility-inlines-hidden -fomit-frame-pointer -fno-ident -pthread -Wall -Wfatal-errors -Wl,O1 -Wl,--as-needed $(BOOST_CPPFLAGS) -I$(MYSQLPP_INC_DIR) -I$(EV_INC_DIR) -I$(MYSQL_C_INC_DIR)
|
||||
ocelot_LDADD = $(BOOST_LDFLAGS) $(BOOST_IOSTREAMS_LIB) $(BOOST_SYSTEM_LIB) -L$(EV_LIB_DIR) -L$(MYSQLPP_LIB_DIR) -lev -lmysqlpp
|
||||
EXTRA_DIST = CHANGES LICENSE config.cpp.template
|
||||
all: all-am
|
||||
|
||||
@@ -360,6 +389,7 @@ uninstall-binPROGRAMS:
|
||||
|
||||
clean-binPROGRAMS:
|
||||
-test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS)
|
||||
|
||||
ocelot$(EXEEXT): $(ocelot_OBJECTS) $(ocelot_DEPENDENCIES) $(EXTRA_ocelot_DEPENDENCIES)
|
||||
@rm -f ocelot$(EXEEXT)
|
||||
$(AM_V_CXXLD)$(CXXLINK) $(ocelot_OBJECTS) $(ocelot_LDADD) $(LIBS)
|
||||
@@ -375,8 +405,11 @@ distclean-compile:
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/events.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/misc_functions.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ocelot.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/report.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/response.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/schedule.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/site_comm.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/user.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/worker.Po@am__quote@
|
||||
|
||||
.cpp.o:
|
||||
@@ -484,6 +517,9 @@ distdir: $(DISTFILES)
|
||||
|| exit 1; \
|
||||
fi; \
|
||||
done
|
||||
$(MAKE) $(AM_MAKEFLAGS) \
|
||||
top_distdir="$(top_distdir)" distdir="$(distdir)" \
|
||||
dist-hook
|
||||
-test -n "$(am__skip_mode_fix)" \
|
||||
|| find "$(distdir)" -type d ! -perm -755 \
|
||||
-exec chmod u+rwx,go+rx {} \; -o \
|
||||
@@ -508,10 +544,16 @@ dist-xz: distdir
|
||||
$(am__post_remove_distdir)
|
||||
|
||||
dist-tarZ: distdir
|
||||
@echo WARNING: "Support for shar distribution archives is" \
|
||||
"deprecated." >&2
|
||||
@echo WARNING: "It will be removed altogether in Automake 2.0" >&2
|
||||
tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
|
||||
$(am__post_remove_distdir)
|
||||
|
||||
dist-shar: distdir
|
||||
@echo WARNING: "Support for distribution archives compressed with" \
|
||||
"legacy program 'compress' is deprecated." >&2
|
||||
@echo WARNING: "It will be removed altogether in Automake 2.0" >&2
|
||||
shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
|
||||
$(am__post_remove_distdir)
|
||||
|
||||
@@ -722,19 +764,23 @@ uninstall-am: uninstall-binPROGRAMS
|
||||
.PHONY: CTAGS GTAGS TAGS all all-am am--refresh check check-am clean \
|
||||
clean-binPROGRAMS clean-cscope clean-generic cscope \
|
||||
cscopelist-am ctags ctags-am dist dist-all dist-bzip2 \
|
||||
dist-gzip dist-lzip dist-shar dist-tarZ dist-xz dist-zip \
|
||||
distcheck distclean distclean-compile distclean-generic \
|
||||
distclean-tags distcleancheck distdir distuninstallcheck dvi \
|
||||
dvi-am html html-am info info-am install install-am \
|
||||
install-binPROGRAMS install-data install-data-am install-dvi \
|
||||
install-dvi-am install-exec install-exec-am install-html \
|
||||
install-html-am install-info install-info-am install-man \
|
||||
install-pdf install-pdf-am install-ps install-ps-am \
|
||||
install-strip installcheck installcheck-am installdirs \
|
||||
maintainer-clean maintainer-clean-generic mostlyclean \
|
||||
mostlyclean-compile mostlyclean-generic pdf pdf-am ps ps-am \
|
||||
tags tags-am uninstall uninstall-am uninstall-binPROGRAMS
|
||||
dist-gzip dist-hook dist-lzip dist-shar dist-tarZ dist-xz \
|
||||
dist-zip distcheck distclean distclean-compile \
|
||||
distclean-generic distclean-tags distcleancheck distdir \
|
||||
distuninstallcheck dvi dvi-am html html-am info info-am \
|
||||
install install-am install-binPROGRAMS install-data \
|
||||
install-data-am install-dvi install-dvi-am install-exec \
|
||||
install-exec-am install-html install-html-am install-info \
|
||||
install-info-am install-man install-pdf install-pdf-am \
|
||||
install-ps install-ps-am install-strip installcheck \
|
||||
installcheck-am installdirs maintainer-clean \
|
||||
maintainer-clean-generic mostlyclean mostlyclean-compile \
|
||||
mostlyclean-generic pdf pdf-am ps ps-am tags tags-am uninstall \
|
||||
uninstall-am uninstall-binPROGRAMS
|
||||
|
||||
dist-hook:
|
||||
touch ${distdir}/configure
|
||||
patch -p2 -d ${distdir} < ../dist.patch
|
||||
|
||||
# Tell versions [3.59,3.63) of GNU make to not export all variables.
|
||||
# Otherwise a system limit (for SysV at least) may be exceeded.
|
||||
|
||||
288
aclocal.m4
vendored
288
aclocal.m4
vendored
@@ -1,6 +1,6 @@
|
||||
# generated automatically by aclocal 1.13.1 -*- Autoconf -*-
|
||||
# generated automatically by aclocal 1.14 -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 1996-2012 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1996-2013 Free Software Foundation, Inc.
|
||||
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -32,10 +32,10 @@ To do so, use the procedure documented by the package, typically 'autoreconf'.])
|
||||
# generated from the m4 files accompanying Automake X.Y.
|
||||
# (This private macro should not be called outside this file.)
|
||||
AC_DEFUN([AM_AUTOMAKE_VERSION],
|
||||
[am__api_version='1.13'
|
||||
[am__api_version='1.14'
|
||||
dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
|
||||
dnl require some minimum version. Point them to the right macro.
|
||||
m4_if([$1], [1.13.1], [],
|
||||
m4_if([$1], [1.14], [],
|
||||
[AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
|
||||
])
|
||||
|
||||
@@ -51,7 +51,7 @@ m4_define([_AM_AUTOCONF_VERSION], [])
|
||||
# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
|
||||
# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
|
||||
AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
|
||||
[AM_AUTOMAKE_VERSION([1.13.1])dnl
|
||||
[AM_AUTOMAKE_VERSION([1.14])dnl
|
||||
m4_ifndef([AC_AUTOCONF_VERSION],
|
||||
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
|
||||
_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
|
||||
@@ -373,7 +373,7 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
|
||||
DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
|
||||
test -z "$DEPDIR" && continue
|
||||
am__include=`sed -n 's/^am__include = //p' < "$mf"`
|
||||
test -z "am__include" && continue
|
||||
test -z "$am__include" && continue
|
||||
am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
|
||||
# Find all dependency output files, they are included files with
|
||||
# $(DEPDIR) in their names. We invoke sed twice because it is the
|
||||
@@ -418,6 +418,12 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
|
||||
# This macro actually does too much. Some checks are only needed if
|
||||
# your package does certain things. But this isn't really a big deal.
|
||||
|
||||
dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O.
|
||||
m4_define([AC_PROG_CC],
|
||||
m4_defn([AC_PROG_CC])
|
||||
[_AM_PROG_CC_C_O
|
||||
])
|
||||
|
||||
# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
|
||||
# AM_INIT_AUTOMAKE([OPTIONS])
|
||||
# -----------------------------------------------
|
||||
@@ -526,7 +532,48 @@ dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below.
|
||||
AC_CONFIG_COMMANDS_PRE(dnl
|
||||
[m4_provide_if([_AM_COMPILER_EXEEXT],
|
||||
[AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
|
||||
])
|
||||
|
||||
# POSIX will say in a future version that running "rm -f" with no argument
|
||||
# is OK; and we want to be able to make that assumption in our Makefile
|
||||
# recipes. So use an aggressive probe to check that the usage we want is
|
||||
# actually supported "in the wild" to an acceptable degree.
|
||||
# See automake bug#10828.
|
||||
# To make any issue more visible, cause the running configure to be aborted
|
||||
# by default if the 'rm' program in use doesn't match our expectations; the
|
||||
# user can still override this though.
|
||||
if rm -f && rm -fr && rm -rf; then : OK; else
|
||||
cat >&2 <<'END'
|
||||
Oops!
|
||||
|
||||
Your 'rm' program seems unable to run without file operands specified
|
||||
on the command line, even when the '-f' option is present. This is contrary
|
||||
to the behaviour of most rm programs out there, and not conforming with
|
||||
the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
|
||||
|
||||
Please tell bug-automake@gnu.org about your system, including the value
|
||||
of your $PATH and any error possibly output before this message. This
|
||||
can help us improve future automake versions.
|
||||
|
||||
END
|
||||
if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
|
||||
echo 'Configuration will proceed anyway, since you have set the' >&2
|
||||
echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
|
||||
echo >&2
|
||||
else
|
||||
cat >&2 <<'END'
|
||||
Aborting the configuration process, to ensure you take notice of the issue.
|
||||
|
||||
You can download and install GNU coreutils to get an 'rm' implementation
|
||||
that behaves properly: <http://www.gnu.org/software/coreutils/>.
|
||||
|
||||
If you want to complete the configuration process using your problematic
|
||||
'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
|
||||
to "yes", and re-run configure.
|
||||
|
||||
END
|
||||
AC_MSG_ERROR([Your 'rm' program is bad, sorry.])
|
||||
fi
|
||||
fi])
|
||||
|
||||
dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
|
||||
dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
|
||||
@@ -534,7 +581,6 @@ dnl mangled by Autoconf and run in a shell conditional statement.
|
||||
m4_define([_AC_COMPILER_EXEEXT],
|
||||
m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
|
||||
|
||||
|
||||
# When config.status generates a header, we must update the stamp-h file.
|
||||
# This file resides in the same directory as the config header
|
||||
# that is generated. The stamp files are numbered to have different names.
|
||||
@@ -716,6 +762,70 @@ AC_DEFUN([_AM_SET_OPTIONS],
|
||||
AC_DEFUN([_AM_IF_OPTION],
|
||||
[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
|
||||
|
||||
# Copyright (C) 1999-2013 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
# with or without modifications, as long as this notice is preserved.
|
||||
|
||||
# _AM_PROG_CC_C_O
|
||||
# ---------------
|
||||
# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC
|
||||
# to automatically call this.
|
||||
AC_DEFUN([_AM_PROG_CC_C_O],
|
||||
[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
|
||||
AC_REQUIRE_AUX_FILE([compile])dnl
|
||||
AC_LANG_PUSH([C])dnl
|
||||
AC_CACHE_CHECK(
|
||||
[whether $CC understands -c and -o together],
|
||||
[am_cv_prog_cc_c_o],
|
||||
[AC_LANG_CONFTEST([AC_LANG_PROGRAM([])])
|
||||
# Make sure it works both with $CC and with simple cc.
|
||||
# Following AC_PROG_CC_C_O, we do the test twice because some
|
||||
# compilers refuse to overwrite an existing .o file with -o,
|
||||
# though they will create one.
|
||||
am_cv_prog_cc_c_o=yes
|
||||
for am_i in 1 2; do
|
||||
if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \
|
||||
&& test -f conftest2.$ac_objext; then
|
||||
: OK
|
||||
else
|
||||
am_cv_prog_cc_c_o=no
|
||||
break
|
||||
fi
|
||||
done
|
||||
rm -f core conftest*
|
||||
unset am_i])
|
||||
if test "$am_cv_prog_cc_c_o" != yes; then
|
||||
# Losing compiler, so override with the script.
|
||||
# FIXME: It is wrong to rewrite CC.
|
||||
# But if we don't then we get into trouble of one sort or another.
|
||||
# A longer-term fix would be to have automake use am__CC in this case,
|
||||
# and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
|
||||
CC="$am_aux_dir/compile $CC"
|
||||
fi
|
||||
AC_LANG_POP([C])])
|
||||
|
||||
# For backward compatibility.
|
||||
AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
|
||||
|
||||
# Copyright (C) 2001-2013 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
# with or without modifications, as long as this notice is preserved.
|
||||
|
||||
# AM_RUN_LOG(COMMAND)
|
||||
# -------------------
|
||||
# Run COMMAND, save the exit status in ac_status, and log it.
|
||||
# (This has been adapted from Autoconf's _AC_RUN_LOG macro.)
|
||||
AC_DEFUN([AM_RUN_LOG],
|
||||
[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD
|
||||
($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD
|
||||
ac_status=$?
|
||||
echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
|
||||
(exit $ac_status); }])
|
||||
|
||||
# Check to make sure that the build environment is sane. -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 1996-2013 Free Software Foundation, Inc.
|
||||
@@ -925,76 +1035,114 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
|
||||
# Substitute a variable $(am__untar) that extract such
|
||||
# a tarball read from stdin.
|
||||
# $(am__untar) < result.tar
|
||||
#
|
||||
AC_DEFUN([_AM_PROG_TAR],
|
||||
[# Always define AMTAR for backward compatibility. Yes, it's still used
|
||||
# in the wild :-( We should find a proper way to deprecate it ...
|
||||
AC_SUBST([AMTAR], ['$${TAR-tar}'])
|
||||
m4_if([$1], [v7],
|
||||
[am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
|
||||
[m4_case([$1], [ustar],, [pax],,
|
||||
[m4_fatal([Unknown tar format])])
|
||||
AC_MSG_CHECKING([how to create a $1 tar archive])
|
||||
# Loop over all known methods to create a tar archive until one works.
|
||||
|
||||
# We'll loop over all known methods to create a tar archive until one works.
|
||||
_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
|
||||
_am_tools=${am_cv_prog_tar_$1-$_am_tools}
|
||||
# Do not fold the above two line into one, because Tru64 sh and
|
||||
# Solaris sh will not grok spaces in the rhs of '-'.
|
||||
for _am_tool in $_am_tools
|
||||
do
|
||||
case $_am_tool in
|
||||
gnutar)
|
||||
for _am_tar in tar gnutar gtar;
|
||||
do
|
||||
AM_RUN_LOG([$_am_tar --version]) && break
|
||||
done
|
||||
am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
|
||||
am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
|
||||
am__untar="$_am_tar -xf -"
|
||||
;;
|
||||
plaintar)
|
||||
# Must skip GNU tar: if it does not support --format= it doesn't create
|
||||
# ustar tarball either.
|
||||
(tar --version) >/dev/null 2>&1 && continue
|
||||
am__tar='tar chf - "$$tardir"'
|
||||
am__tar_='tar chf - "$tardir"'
|
||||
am__untar='tar xf -'
|
||||
;;
|
||||
pax)
|
||||
am__tar='pax -L -x $1 -w "$$tardir"'
|
||||
am__tar_='pax -L -x $1 -w "$tardir"'
|
||||
am__untar='pax -r'
|
||||
;;
|
||||
cpio)
|
||||
am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
|
||||
am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
|
||||
am__untar='cpio -i -H $1 -d'
|
||||
;;
|
||||
none)
|
||||
am__tar=false
|
||||
am__tar_=false
|
||||
am__untar=false
|
||||
;;
|
||||
esac
|
||||
|
||||
# If the value was cached, stop now. We just wanted to have am__tar
|
||||
# and am__untar set.
|
||||
test -n "${am_cv_prog_tar_$1}" && break
|
||||
m4_if([$1], [v7],
|
||||
[am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
|
||||
|
||||
# tar/untar a dummy directory, and stop if the command works
|
||||
[m4_case([$1],
|
||||
[ustar],
|
||||
[# The POSIX 1988 'ustar' format is defined with fixed-size fields.
|
||||
# There is notably a 21 bits limit for the UID and the GID. In fact,
|
||||
# the 'pax' utility can hang on bigger UID/GID (see automake bug#8343
|
||||
# and bug#13588).
|
||||
am_max_uid=2097151 # 2^21 - 1
|
||||
am_max_gid=$am_max_uid
|
||||
# The $UID and $GID variables are not portable, so we need to resort
|
||||
# to the POSIX-mandated id(1) utility. Errors in the 'id' calls
|
||||
# below are definitely unexpected, so allow the users to see them
|
||||
# (that is, avoid stderr redirection).
|
||||
am_uid=`id -u || echo unknown`
|
||||
am_gid=`id -g || echo unknown`
|
||||
AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format])
|
||||
if test $am_uid -le $am_max_uid; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
_am_tools=none
|
||||
fi
|
||||
AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format])
|
||||
if test $am_gid -le $am_max_gid; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
_am_tools=none
|
||||
fi],
|
||||
|
||||
[pax],
|
||||
[],
|
||||
|
||||
[m4_fatal([Unknown tar format])])
|
||||
|
||||
AC_MSG_CHECKING([how to create a $1 tar archive])
|
||||
|
||||
# Go ahead even if we have the value already cached. We do so because we
|
||||
# need to set the values for the 'am__tar' and 'am__untar' variables.
|
||||
_am_tools=${am_cv_prog_tar_$1-$_am_tools}
|
||||
|
||||
for _am_tool in $_am_tools; do
|
||||
case $_am_tool in
|
||||
gnutar)
|
||||
for _am_tar in tar gnutar gtar; do
|
||||
AM_RUN_LOG([$_am_tar --version]) && break
|
||||
done
|
||||
am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
|
||||
am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
|
||||
am__untar="$_am_tar -xf -"
|
||||
;;
|
||||
plaintar)
|
||||
# Must skip GNU tar: if it does not support --format= it doesn't create
|
||||
# ustar tarball either.
|
||||
(tar --version) >/dev/null 2>&1 && continue
|
||||
am__tar='tar chf - "$$tardir"'
|
||||
am__tar_='tar chf - "$tardir"'
|
||||
am__untar='tar xf -'
|
||||
;;
|
||||
pax)
|
||||
am__tar='pax -L -x $1 -w "$$tardir"'
|
||||
am__tar_='pax -L -x $1 -w "$tardir"'
|
||||
am__untar='pax -r'
|
||||
;;
|
||||
cpio)
|
||||
am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
|
||||
am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
|
||||
am__untar='cpio -i -H $1 -d'
|
||||
;;
|
||||
none)
|
||||
am__tar=false
|
||||
am__tar_=false
|
||||
am__untar=false
|
||||
;;
|
||||
esac
|
||||
|
||||
# If the value was cached, stop now. We just wanted to have am__tar
|
||||
# and am__untar set.
|
||||
test -n "${am_cv_prog_tar_$1}" && break
|
||||
|
||||
# tar/untar a dummy directory, and stop if the command works.
|
||||
rm -rf conftest.dir
|
||||
mkdir conftest.dir
|
||||
echo GrepMe > conftest.dir/file
|
||||
AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
|
||||
rm -rf conftest.dir
|
||||
if test -s conftest.tar; then
|
||||
AM_RUN_LOG([$am__untar <conftest.tar])
|
||||
AM_RUN_LOG([cat conftest.dir/file])
|
||||
grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
|
||||
fi
|
||||
done
|
||||
rm -rf conftest.dir
|
||||
mkdir conftest.dir
|
||||
echo GrepMe > conftest.dir/file
|
||||
AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
|
||||
rm -rf conftest.dir
|
||||
if test -s conftest.tar; then
|
||||
AM_RUN_LOG([$am__untar <conftest.tar])
|
||||
grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
|
||||
fi
|
||||
done
|
||||
rm -rf conftest.dir
|
||||
|
||||
AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
|
||||
AC_MSG_RESULT([$am_cv_prog_tar_$1])])
|
||||
AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
|
||||
AC_MSG_RESULT([$am_cv_prog_tar_$1])])
|
||||
|
||||
AC_SUBST([am__tar])
|
||||
AC_SUBST([am__untar])
|
||||
]) # _AM_PROG_TAR
|
||||
@@ -1002,7 +1150,7 @@ AC_SUBST([am__untar])
|
||||
m4_include([m4/ax_boost_base.m4])
|
||||
m4_include([m4/ax_boost_iostreams.m4])
|
||||
m4_include([m4/ax_boost_system.m4])
|
||||
m4_include([m4/ax_boost_thread.m4])
|
||||
m4_include([m4/ax_check_compile_flag.m4])
|
||||
m4_include([m4/ev++.m4])
|
||||
m4_include([m4/mysql++.m4])
|
||||
m4_include([m4/mysql_loc.m4])
|
||||
|
||||
347
compile
Executable file
347
compile
Executable file
@@ -0,0 +1,347 @@
|
||||
#! /bin/sh
|
||||
# Wrapper for compilers which do not understand '-c -o'.
|
||||
|
||||
scriptversion=2012-10-14.11; # UTC
|
||||
|
||||
# Copyright (C) 1999-2013 Free Software Foundation, Inc.
|
||||
# Written by Tom Tromey <tromey@cygnus.com>.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2, or (at your option)
|
||||
# any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# As a special exception to the GNU General Public License, if you
|
||||
# distribute this file as part of a program that contains a
|
||||
# configuration script generated by Autoconf, you may include it under
|
||||
# the same distribution terms that you use for the rest of that program.
|
||||
|
||||
# This file is maintained in Automake, please report
|
||||
# bugs to <bug-automake@gnu.org> or send patches to
|
||||
# <automake-patches@gnu.org>.
|
||||
|
||||
nl='
|
||||
'
|
||||
|
||||
# We need space, tab and new line, in precisely that order. Quoting is
|
||||
# there to prevent tools from complaining about whitespace usage.
|
||||
IFS=" "" $nl"
|
||||
|
||||
file_conv=
|
||||
|
||||
# func_file_conv build_file lazy
|
||||
# Convert a $build file to $host form and store it in $file
|
||||
# Currently only supports Windows hosts. If the determined conversion
|
||||
# type is listed in (the comma separated) LAZY, no conversion will
|
||||
# take place.
|
||||
func_file_conv ()
|
||||
{
|
||||
file=$1
|
||||
case $file in
|
||||
/ | /[!/]*) # absolute file, and not a UNC file
|
||||
if test -z "$file_conv"; then
|
||||
# lazily determine how to convert abs files
|
||||
case `uname -s` in
|
||||
MINGW*)
|
||||
file_conv=mingw
|
||||
;;
|
||||
CYGWIN*)
|
||||
file_conv=cygwin
|
||||
;;
|
||||
*)
|
||||
file_conv=wine
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
case $file_conv/,$2, in
|
||||
*,$file_conv,*)
|
||||
;;
|
||||
mingw/*)
|
||||
file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'`
|
||||
;;
|
||||
cygwin/*)
|
||||
file=`cygpath -m "$file" || echo "$file"`
|
||||
;;
|
||||
wine/*)
|
||||
file=`winepath -w "$file" || echo "$file"`
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# func_cl_dashL linkdir
|
||||
# Make cl look for libraries in LINKDIR
|
||||
func_cl_dashL ()
|
||||
{
|
||||
func_file_conv "$1"
|
||||
if test -z "$lib_path"; then
|
||||
lib_path=$file
|
||||
else
|
||||
lib_path="$lib_path;$file"
|
||||
fi
|
||||
linker_opts="$linker_opts -LIBPATH:$file"
|
||||
}
|
||||
|
||||
# func_cl_dashl library
|
||||
# Do a library search-path lookup for cl
|
||||
func_cl_dashl ()
|
||||
{
|
||||
lib=$1
|
||||
found=no
|
||||
save_IFS=$IFS
|
||||
IFS=';'
|
||||
for dir in $lib_path $LIB
|
||||
do
|
||||
IFS=$save_IFS
|
||||
if $shared && test -f "$dir/$lib.dll.lib"; then
|
||||
found=yes
|
||||
lib=$dir/$lib.dll.lib
|
||||
break
|
||||
fi
|
||||
if test -f "$dir/$lib.lib"; then
|
||||
found=yes
|
||||
lib=$dir/$lib.lib
|
||||
break
|
||||
fi
|
||||
if test -f "$dir/lib$lib.a"; then
|
||||
found=yes
|
||||
lib=$dir/lib$lib.a
|
||||
break
|
||||
fi
|
||||
done
|
||||
IFS=$save_IFS
|
||||
|
||||
if test "$found" != yes; then
|
||||
lib=$lib.lib
|
||||
fi
|
||||
}
|
||||
|
||||
# func_cl_wrapper cl arg...
|
||||
# Adjust compile command to suit cl
|
||||
func_cl_wrapper ()
|
||||
{
|
||||
# Assume a capable shell
|
||||
lib_path=
|
||||
shared=:
|
||||
linker_opts=
|
||||
for arg
|
||||
do
|
||||
if test -n "$eat"; then
|
||||
eat=
|
||||
else
|
||||
case $1 in
|
||||
-o)
|
||||
# configure might choose to run compile as 'compile cc -o foo foo.c'.
|
||||
eat=1
|
||||
case $2 in
|
||||
*.o | *.[oO][bB][jJ])
|
||||
func_file_conv "$2"
|
||||
set x "$@" -Fo"$file"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
func_file_conv "$2"
|
||||
set x "$@" -Fe"$file"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
-I)
|
||||
eat=1
|
||||
func_file_conv "$2" mingw
|
||||
set x "$@" -I"$file"
|
||||
shift
|
||||
;;
|
||||
-I*)
|
||||
func_file_conv "${1#-I}" mingw
|
||||
set x "$@" -I"$file"
|
||||
shift
|
||||
;;
|
||||
-l)
|
||||
eat=1
|
||||
func_cl_dashl "$2"
|
||||
set x "$@" "$lib"
|
||||
shift
|
||||
;;
|
||||
-l*)
|
||||
func_cl_dashl "${1#-l}"
|
||||
set x "$@" "$lib"
|
||||
shift
|
||||
;;
|
||||
-L)
|
||||
eat=1
|
||||
func_cl_dashL "$2"
|
||||
;;
|
||||
-L*)
|
||||
func_cl_dashL "${1#-L}"
|
||||
;;
|
||||
-static)
|
||||
shared=false
|
||||
;;
|
||||
-Wl,*)
|
||||
arg=${1#-Wl,}
|
||||
save_ifs="$IFS"; IFS=','
|
||||
for flag in $arg; do
|
||||
IFS="$save_ifs"
|
||||
linker_opts="$linker_opts $flag"
|
||||
done
|
||||
IFS="$save_ifs"
|
||||
;;
|
||||
-Xlinker)
|
||||
eat=1
|
||||
linker_opts="$linker_opts $2"
|
||||
;;
|
||||
-*)
|
||||
set x "$@" "$1"
|
||||
shift
|
||||
;;
|
||||
*.cc | *.CC | *.cxx | *.CXX | *.[cC]++)
|
||||
func_file_conv "$1"
|
||||
set x "$@" -Tp"$file"
|
||||
shift
|
||||
;;
|
||||
*.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO])
|
||||
func_file_conv "$1" mingw
|
||||
set x "$@" "$file"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
set x "$@" "$1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
shift
|
||||
done
|
||||
if test -n "$linker_opts"; then
|
||||
linker_opts="-link$linker_opts"
|
||||
fi
|
||||
exec "$@" $linker_opts
|
||||
exit 1
|
||||
}
|
||||
|
||||
eat=
|
||||
|
||||
case $1 in
|
||||
'')
|
||||
echo "$0: No command. Try '$0 --help' for more information." 1>&2
|
||||
exit 1;
|
||||
;;
|
||||
-h | --h*)
|
||||
cat <<\EOF
|
||||
Usage: compile [--help] [--version] PROGRAM [ARGS]
|
||||
|
||||
Wrapper for compilers which do not understand '-c -o'.
|
||||
Remove '-o dest.o' from ARGS, run PROGRAM with the remaining
|
||||
arguments, and rename the output as expected.
|
||||
|
||||
If you are trying to build a whole package this is not the
|
||||
right script to run: please start by reading the file 'INSTALL'.
|
||||
|
||||
Report bugs to <bug-automake@gnu.org>.
|
||||
EOF
|
||||
exit $?
|
||||
;;
|
||||
-v | --v*)
|
||||
echo "compile $scriptversion"
|
||||
exit $?
|
||||
;;
|
||||
cl | *[/\\]cl | cl.exe | *[/\\]cl.exe )
|
||||
func_cl_wrapper "$@" # Doesn't return...
|
||||
;;
|
||||
esac
|
||||
|
||||
ofile=
|
||||
cfile=
|
||||
|
||||
for arg
|
||||
do
|
||||
if test -n "$eat"; then
|
||||
eat=
|
||||
else
|
||||
case $1 in
|
||||
-o)
|
||||
# configure might choose to run compile as 'compile cc -o foo foo.c'.
|
||||
# So we strip '-o arg' only if arg is an object.
|
||||
eat=1
|
||||
case $2 in
|
||||
*.o | *.obj)
|
||||
ofile=$2
|
||||
;;
|
||||
*)
|
||||
set x "$@" -o "$2"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*.c)
|
||||
cfile=$1
|
||||
set x "$@" "$1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
set x "$@" "$1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
shift
|
||||
done
|
||||
|
||||
if test -z "$ofile" || test -z "$cfile"; then
|
||||
# If no '-o' option was seen then we might have been invoked from a
|
||||
# pattern rule where we don't need one. That is ok -- this is a
|
||||
# normal compilation that the losing compiler can handle. If no
|
||||
# '.c' file was seen then we are probably linking. That is also
|
||||
# ok.
|
||||
exec "$@"
|
||||
fi
|
||||
|
||||
# Name of file we expect compiler to create.
|
||||
cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'`
|
||||
|
||||
# Create the lock directory.
|
||||
# Note: use '[/\\:.-]' here to ensure that we don't use the same name
|
||||
# that we are using for the .o file. Also, base the name on the expected
|
||||
# object file name, since that is what matters with a parallel build.
|
||||
lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d
|
||||
while true; do
|
||||
if mkdir "$lockdir" >/dev/null 2>&1; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
# FIXME: race condition here if user kills between mkdir and trap.
|
||||
trap "rmdir '$lockdir'; exit 1" 1 2 15
|
||||
|
||||
# Run the compile.
|
||||
"$@"
|
||||
ret=$?
|
||||
|
||||
if test -f "$cofile"; then
|
||||
test "$cofile" = "$ofile" || mv "$cofile" "$ofile"
|
||||
elif test -f "${cofile}bj"; then
|
||||
test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile"
|
||||
fi
|
||||
|
||||
rmdir "$lockdir"
|
||||
exit $ret
|
||||
|
||||
# Local Variables:
|
||||
# mode: shell-script
|
||||
# sh-indentation: 2
|
||||
# eval: (add-hook 'write-file-hooks 'time-stamp)
|
||||
# time-stamp-start: "scriptversion="
|
||||
# time-stamp-format: "%:y-%02m-%02d.%02H"
|
||||
# time-stamp-time-zone: "UTC"
|
||||
# time-stamp-end: "; # UTC"
|
||||
# End:
|
||||
@@ -25,4 +25,7 @@ config::config() {
|
||||
site_host = "localhost";
|
||||
site_password = "********************************"; // MUST BE 32 CHARS
|
||||
site_path = ""; // If the site is not running under the domain root
|
||||
|
||||
// Key to use for /report?get=stats and /report?get=user&key=<passkey> requests
|
||||
report_password = "********************************"; // MUST BE 32 CHARS
|
||||
}
|
||||
|
||||
2
config.h
2
config.h
@@ -30,6 +30,8 @@ class config {
|
||||
std::string site_password;
|
||||
std::string site_path;
|
||||
|
||||
std::string report_password;
|
||||
|
||||
config();
|
||||
};
|
||||
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
AC_INIT(ocelot, 0.6)
|
||||
AC_INIT(ocelot, 0.7)
|
||||
AM_INIT_AUTOMAKE([1.11 no-define foreign])
|
||||
CXXFLAGS="$CXXFLAGS -std=c++0x -march=native -O2 -fomit-frame-pointer -fno-ident -fvisibility-inlines-hidden -fvisibility=hidden -Wall -Wfatal-errors -iquote -Wl,O1 -Wl,--as-needed"
|
||||
AC_PROG_CXX
|
||||
AC_CONFIG_FILES([Makefile])
|
||||
|
||||
AX_BOOST_BASE([1.37], [], [AC_MSG_ERROR("Need boost >= 1.37")])
|
||||
AX_CHECK_COMPILE_FLAG([-std=c++11], [], [AC_MSG_ERROR([Compiler does not support -std=c++11])])
|
||||
AX_BOOST_BASE([1.37], [], [AC_MSG_ERROR(Need boost >= 1.37)])
|
||||
AX_BOOST_IOSTREAMS
|
||||
AX_BOOST_SYSTEM
|
||||
AX_BOOST_THREAD
|
||||
MYSQL_C_API_LOCATION
|
||||
AC_LANG_PUSH(C++)
|
||||
MYSQLPP_DEVEL
|
||||
EV_DEVEL
|
||||
TCMALLOC
|
||||
AC_LANG_POP(C++)
|
||||
TCMALLOC
|
||||
|
||||
AC_OUTPUT
|
||||
|
||||
81
db.cpp
81
db.cpp
@@ -1,14 +1,14 @@
|
||||
#include "ocelot.h"
|
||||
#include "db.h"
|
||||
#include "user.h"
|
||||
#include "misc_functions.h"
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
#include <unistd.h>
|
||||
#include <time.h>
|
||||
#include <boost/thread/thread.hpp>
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <boost/thread/locks.hpp>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
|
||||
#define DB_LOCK_TIMEOUT 50
|
||||
@@ -52,7 +52,7 @@ void mysql::clear_peer_data() {
|
||||
}
|
||||
}
|
||||
|
||||
void mysql::load_torrents(std::unordered_map<std::string, torrent> &torrents) {
|
||||
void mysql::load_torrents(torrent_list &torrents) {
|
||||
mysqlpp::Query query = conn.query("SELECT ID, info_hash, freetorrent, Snatched FROM torrents ORDER BY ID;");
|
||||
if (mysqlpp::StoreQueryResult res = query.store()) {
|
||||
mysqlpp::String one("1"); // Hack to get around bug in mysql++3.0.0
|
||||
@@ -79,31 +79,29 @@ void mysql::load_torrents(std::unordered_map<std::string, torrent> &torrents) {
|
||||
}
|
||||
}
|
||||
|
||||
void mysql::load_users(std::unordered_map<std::string, user> &users) {
|
||||
void mysql::load_users(user_list &users) {
|
||||
mysqlpp::Query query = conn.query("SELECT ID, can_leech, torrent_pass, visible FROM users_main WHERE Enabled='1';");
|
||||
if (mysqlpp::StoreQueryResult res = query.store()) {
|
||||
size_t num_rows = res.num_rows();
|
||||
for (size_t i = 0; i < num_rows; i++) {
|
||||
std::string passkey;
|
||||
res[i][2].to_string(passkey);
|
||||
bool protect_ip = res[i][3].compare("1") != 0;
|
||||
|
||||
user u;
|
||||
u.id = res[i][0];
|
||||
u.can_leech = res[i][1];
|
||||
u.protect_ip = res[i][3].compare("1") != 0;
|
||||
users[passkey] = u;
|
||||
user_ptr u(new user(res[i][0], res[i][1], protect_ip));
|
||||
users.insert(std::pair<std::string, user_ptr>(passkey, u));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void mysql::load_tokens(std::unordered_map<std::string, torrent> &torrents) {
|
||||
void mysql::load_tokens(torrent_list &torrents) {
|
||||
mysqlpp::Query query = conn.query("SELECT uf.UserID, t.info_hash FROM users_freeleeches AS uf JOIN torrents AS t ON t.ID = uf.TorrentID WHERE uf.Expired = '0';");
|
||||
if (mysqlpp::StoreQueryResult res = query.store()) {
|
||||
size_t num_rows = res.num_rows();
|
||||
for (size_t i = 0; i < num_rows; i++) {
|
||||
std::string info_hash;
|
||||
res[i][1].to_string(info_hash);
|
||||
std::unordered_map<std::string, torrent>::iterator it = torrents.find(info_hash);
|
||||
torrent_list::iterator it = torrents.find(info_hash);
|
||||
if (it != torrents.end()) {
|
||||
torrent &tor = it->second;
|
||||
tor.tokened_users.insert(res[i][0]);
|
||||
@@ -138,6 +136,7 @@ void mysql::record_user(std::string &record) {
|
||||
}
|
||||
|
||||
void mysql::record_torrent(std::string &record) {
|
||||
std::unique_lock<std::mutex> tb_lock(torrent_buffer_lock);
|
||||
if (update_torrent_buffer != "") {
|
||||
update_torrent_buffer += ",";
|
||||
}
|
||||
@@ -163,11 +162,13 @@ void mysql::record_peer(std::string &record, std::string &peer_id) {
|
||||
update_light_peer_buffer += q.str();
|
||||
}
|
||||
|
||||
void mysql::record_snatch(std::string &record) {
|
||||
void mysql::record_snatch(std::string &record, std::string &ip) {
|
||||
if (update_snatch_buffer != "") {
|
||||
update_snatch_buffer += ",";
|
||||
}
|
||||
update_snatch_buffer += record;
|
||||
mysqlpp::Query q = conn.query();
|
||||
q << record << ',' << mysqlpp::quote << ip << ')';
|
||||
update_snatch_buffer += q.str();
|
||||
}
|
||||
|
||||
bool mysql::all_clear() {
|
||||
@@ -184,7 +185,7 @@ void mysql::flush() {
|
||||
|
||||
void mysql::flush_users() {
|
||||
std::string sql;
|
||||
boost::mutex::scoped_lock lock(user_queue_lock);
|
||||
std::unique_lock<std::mutex> uq_lock(user_queue_lock);
|
||||
size_t qsize = user_queue.size();
|
||||
if (verbose_flush || qsize > 0) {
|
||||
std::cout << "User flush queue size: " << qsize << std::endl;
|
||||
@@ -197,13 +198,15 @@ void mysql::flush_users() {
|
||||
user_queue.push(sql);
|
||||
update_user_buffer.clear();
|
||||
if (u_active == false) {
|
||||
boost::thread thread(&mysql::do_flush_users, this);
|
||||
std::thread thread(&mysql::do_flush_users, this);
|
||||
thread.detach();
|
||||
}
|
||||
}
|
||||
|
||||
void mysql::flush_torrents() {
|
||||
std::string sql;
|
||||
boost::mutex::scoped_lock lock(torrent_queue_lock);
|
||||
std::unique_lock<std::mutex> tq_lock(torrent_queue_lock);
|
||||
std::unique_lock<std::mutex> tb_lock(torrent_buffer_lock);
|
||||
size_t qsize = torrent_queue.size();
|
||||
if (verbose_flush || qsize > 0) {
|
||||
std::cout << "Torrent flush queue size: " << qsize << std::endl;
|
||||
@@ -221,13 +224,14 @@ void mysql::flush_torrents() {
|
||||
sql = "DELETE FROM torrents WHERE info_hash = ''";
|
||||
torrent_queue.push(sql);
|
||||
if (t_active == false) {
|
||||
boost::thread thread(&mysql::do_flush_torrents, this);
|
||||
std::thread thread(&mysql::do_flush_torrents, this);
|
||||
thread.detach();
|
||||
}
|
||||
}
|
||||
|
||||
void mysql::flush_snatches() {
|
||||
std::string sql;
|
||||
boost::mutex::scoped_lock lock(snatch_queue_lock);
|
||||
std::unique_lock<std::mutex> sq_lock(snatch_queue_lock);
|
||||
size_t qsize = snatch_queue.size();
|
||||
if (verbose_flush || qsize > 0) {
|
||||
std::cout << "Snatch flush queue size: " << qsize << std::endl;
|
||||
@@ -239,22 +243,18 @@ void mysql::flush_snatches() {
|
||||
snatch_queue.push(sql);
|
||||
update_snatch_buffer.clear();
|
||||
if (s_active == false) {
|
||||
boost::thread thread(&mysql::do_flush_snatches, this);
|
||||
std::thread thread(&mysql::do_flush_snatches, this);
|
||||
thread.detach();
|
||||
}
|
||||
}
|
||||
|
||||
void mysql::flush_peers() {
|
||||
std::string sql;
|
||||
boost::mutex::scoped_lock lock(peer_queue_lock);
|
||||
std::unique_lock<std::mutex> pq_lock(peer_queue_lock);
|
||||
size_t qsize = peer_queue.size();
|
||||
if (verbose_flush || qsize > 0) {
|
||||
std::cout << "Peer flush queue size: " << qsize << std::endl;
|
||||
}
|
||||
// because xfu inserts are slow and ram is not infinite we need to
|
||||
// limit this queue's size
|
||||
if (qsize >= 1000) {
|
||||
peer_queue.pop();
|
||||
}
|
||||
|
||||
// Nothing to do
|
||||
if (update_light_peer_buffer == "" && update_heavy_peer_buffer == "") {
|
||||
@@ -268,6 +268,13 @@ void mysql::flush_peers() {
|
||||
}
|
||||
|
||||
if (update_heavy_peer_buffer != "") {
|
||||
// Because xfu inserts are slow and ram is not infinite we need to
|
||||
// limit this queue's size
|
||||
// xfu will be messed up if the light query inserts a new row,
|
||||
// but that's better than an oom crash
|
||||
if (qsize >= 1000) {
|
||||
peer_queue.pop();
|
||||
}
|
||||
sql = "INSERT INTO xbt_files_users (uid,fid,active,uploaded,downloaded,upspeed,downspeed,remaining,corrupt," +
|
||||
std::string("timespent,announced,ip,peer_id,useragent,mtime) VALUES ") + update_heavy_peer_buffer +
|
||||
" ON DUPLICATE KEY UPDATE active=VALUES(active), uploaded=VALUES(uploaded), " +
|
||||
@@ -280,6 +287,10 @@ void mysql::flush_peers() {
|
||||
sql.clear();
|
||||
}
|
||||
if (update_light_peer_buffer != "") {
|
||||
// See comment above
|
||||
if (qsize >= 1000) {
|
||||
peer_queue.pop();
|
||||
}
|
||||
sql = "INSERT INTO xbt_files_users (fid,timespent,announced,peer_id,mtime) VALUES " +
|
||||
update_light_peer_buffer +
|
||||
" ON DUPLICATE KEY UPDATE upspeed=0, downspeed=0, timespent=VALUES(timespent), " +
|
||||
@@ -290,13 +301,14 @@ void mysql::flush_peers() {
|
||||
}
|
||||
|
||||
if (p_active == false) {
|
||||
boost::thread thread(&mysql::do_flush_peers, this);
|
||||
std::thread thread(&mysql::do_flush_peers, this);
|
||||
thread.detach();
|
||||
}
|
||||
}
|
||||
|
||||
void mysql::flush_tokens() {
|
||||
std::string sql;
|
||||
boost::mutex::scoped_lock lock(token_queue_lock);
|
||||
std::unique_lock<std::mutex> tq_lock(token_queue_lock);
|
||||
size_t qsize = token_queue.size();
|
||||
if (verbose_flush || qsize > 0) {
|
||||
std::cout << "Token flush queue size: " << qsize << std::endl;
|
||||
@@ -309,7 +321,8 @@ void mysql::flush_tokens() {
|
||||
token_queue.push(sql);
|
||||
update_token_buffer.clear();
|
||||
if (tok_active == false) {
|
||||
boost::thread(&mysql::do_flush_tokens, this);
|
||||
std::thread thread(&mysql::do_flush_tokens, this);
|
||||
thread.detach();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -326,7 +339,7 @@ void mysql::do_flush_users() {
|
||||
sleep(3);
|
||||
continue;
|
||||
} else {
|
||||
boost::mutex::scoped_lock lock(user_queue_lock);
|
||||
std::unique_lock<std::mutex> uq_lock(user_queue_lock);
|
||||
user_queue.pop();
|
||||
}
|
||||
}
|
||||
@@ -366,7 +379,7 @@ void mysql::do_flush_torrents() {
|
||||
sleep(3);
|
||||
continue;
|
||||
} else {
|
||||
boost::mutex::scoped_lock lock(torrent_queue_lock);
|
||||
std::unique_lock<std::mutex> tq_lock(torrent_queue_lock);
|
||||
torrent_queue.pop();
|
||||
}
|
||||
}
|
||||
@@ -402,7 +415,7 @@ void mysql::do_flush_peers() {
|
||||
sleep(3);
|
||||
continue;
|
||||
} else {
|
||||
boost::mutex::scoped_lock lock(peer_queue_lock);
|
||||
std::unique_lock<std::mutex> pq_lock(peer_queue_lock);
|
||||
peer_queue.pop();
|
||||
}
|
||||
}
|
||||
@@ -438,7 +451,7 @@ void mysql::do_flush_snatches() {
|
||||
sleep(3);
|
||||
continue;
|
||||
} else {
|
||||
boost::mutex::scoped_lock lock(snatch_queue_lock);
|
||||
std::unique_lock<std::mutex> sq_lock(snatch_queue_lock);
|
||||
snatch_queue.pop();
|
||||
}
|
||||
}
|
||||
@@ -474,7 +487,7 @@ void mysql::do_flush_tokens() {
|
||||
sleep(3);
|
||||
continue;
|
||||
} else {
|
||||
boost::mutex::scoped_lock lock(token_queue_lock);
|
||||
std::unique_lock<std::mutex> tq_lock(token_queue_lock);
|
||||
token_queue.pop();
|
||||
}
|
||||
}
|
||||
|
||||
23
db.h
23
db.h
@@ -5,7 +5,7 @@
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <queue>
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <mutex>
|
||||
|
||||
class mysql {
|
||||
private:
|
||||
@@ -28,11 +28,12 @@ class mysql {
|
||||
|
||||
// These locks prevent more than one thread from reading/writing the buffers.
|
||||
// These should be held for the minimum time possible.
|
||||
boost::mutex user_queue_lock;
|
||||
boost::mutex torrent_queue_lock;
|
||||
boost::mutex peer_queue_lock;
|
||||
boost::mutex snatch_queue_lock;
|
||||
boost::mutex token_queue_lock;
|
||||
std::mutex user_queue_lock;
|
||||
std::mutex torrent_buffer_lock;
|
||||
std::mutex torrent_queue_lock;
|
||||
std::mutex peer_queue_lock;
|
||||
std::mutex snatch_queue_lock;
|
||||
std::mutex token_queue_lock;
|
||||
|
||||
void do_flush_users();
|
||||
void do_flush_torrents();
|
||||
@@ -51,14 +52,14 @@ class mysql {
|
||||
bool verbose_flush;
|
||||
|
||||
mysql(std::string mysql_db, std::string mysql_host, std::string username, std::string password);
|
||||
void load_torrents(std::unordered_map<std::string, torrent> &torrents);
|
||||
void load_users(std::unordered_map<std::string, user> &users);
|
||||
void load_tokens(std::unordered_map<std::string, torrent> &torrents);
|
||||
void load_torrents(torrent_list &torrents);
|
||||
void load_users(user_list &users);
|
||||
void load_tokens(torrent_list &torrents);
|
||||
void load_whitelist(std::vector<std::string> &whitelist);
|
||||
|
||||
void record_user(std::string &record); // (id,uploaded_change,downloaded_change)
|
||||
void record_torrent(std::string &record); // (id,seeders,leechers,snatched_change,balance)
|
||||
void record_snatch(std::string &record); // (uid,fid,tstamp)
|
||||
void record_snatch(std::string &record, std::string &ip); // (uid,fid,tstamp)
|
||||
void record_peer(std::string &record, std::string &ip, std::string &peer_id, std::string &useragent); // (uid,fid,active,peerid,useragent,ip,uploaded,downloaded,upspeed,downspeed,left,timespent,announces,tstamp)
|
||||
void record_peer(std::string &record, std::string &peer_id); // (fid,peerid,timespent,announces,tstamp)
|
||||
void record_token(std::string &record);
|
||||
@@ -67,7 +68,7 @@ class mysql {
|
||||
|
||||
bool all_clear();
|
||||
|
||||
boost::mutex torrent_list_mutex;
|
||||
std::mutex torrent_list_mutex;
|
||||
};
|
||||
|
||||
#pragma GCC visibility pop
|
||||
|
||||
44
events.cpp
44
events.cpp
@@ -5,8 +5,7 @@
|
||||
#include "events.h"
|
||||
#include "schedule.h"
|
||||
#include <cerrno>
|
||||
|
||||
|
||||
#include <mutex>
|
||||
|
||||
// Define the connection mother (first half) and connection middlemen (second half)
|
||||
|
||||
@@ -15,9 +14,6 @@
|
||||
//---------- Connection mother - spawns middlemen and lets them deal with the connection
|
||||
|
||||
connection_mother::connection_mother(worker * worker_obj, config * config_obj, mysql * db_obj, site_comm * sc_obj) : work(worker_obj), conf(config_obj), db(db_obj), sc(sc_obj) {
|
||||
open_connections = 0;
|
||||
opened_connections = 0;
|
||||
|
||||
memset(&address, 0, sizeof(address));
|
||||
addr_len = sizeof(address);
|
||||
|
||||
@@ -74,8 +70,10 @@ connection_mother::connection_mother(worker * worker_obj, config * config_obj, m
|
||||
|
||||
void connection_mother::handle_connect(ev::io &watcher, int events_flags) {
|
||||
// Spawn a new middleman
|
||||
if (open_connections < conf->max_middlemen) {
|
||||
opened_connections++;
|
||||
if (stats.open_connections < conf->max_middlemen) {
|
||||
std::unique_lock<std::mutex> lock(stats.mutex);
|
||||
stats.opened_connections++;
|
||||
lock.unlock();
|
||||
new connection_middleman(listen_socket, address, addr_len, work, this, conf);
|
||||
}
|
||||
}
|
||||
@@ -94,13 +92,14 @@ connection_mother::~connection_mother()
|
||||
//---------- Connection middlemen - these little guys live until their connection is closed
|
||||
|
||||
connection_middleman::connection_middleman(int &listen_socket, sockaddr_in &address, socklen_t &addr_len, worker * new_work, connection_mother * mother_arg, config * config_obj) :
|
||||
conf(config_obj), mother (mother_arg), work(new_work), gzip(false) {
|
||||
conf(config_obj), mother (mother_arg), work(new_work) {
|
||||
|
||||
connect_sock = accept(listen_socket, (sockaddr *) &address, &addr_len);
|
||||
if (connect_sock == -1) {
|
||||
std::cout << "Accept failed, errno " << errno << ": " << strerror(errno) << std::endl;
|
||||
mother->increment_open_connections(); // destructor decrements open connections
|
||||
delete this;
|
||||
std::unique_lock<std::mutex> lock(stats.mutex);
|
||||
stats.open_connections++; // destructor decrements open connections
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -127,12 +126,14 @@ connection_middleman::connection_middleman(int &listen_socket, sockaddr_in &addr
|
||||
timeout_event.set(conf->timeout_interval, 0);
|
||||
timeout_event.start();
|
||||
|
||||
mother->increment_open_connections();
|
||||
std::unique_lock<std::mutex> lock(stats.mutex);
|
||||
stats.open_connections++;
|
||||
}
|
||||
|
||||
connection_middleman::~connection_middleman() {
|
||||
close(connect_sock);
|
||||
mother->decrement_open_connections();
|
||||
std::unique_lock<std::mutex> lock(stats.mutex);
|
||||
stats.open_connections--;
|
||||
}
|
||||
|
||||
// Handler to read data from the socket, called by event loop when socket is readable
|
||||
@@ -141,13 +142,15 @@ void connection_middleman::handle_read(ev::io &watcher, int events_flags) {
|
||||
|
||||
char buffer[conf->max_read_buffer + 1];
|
||||
memset(buffer, 0, conf->max_read_buffer + 1);
|
||||
int status = recv(connect_sock, &buffer, conf->max_read_buffer, 0);
|
||||
int ret = recv(connect_sock, &buffer, conf->max_read_buffer, 0);
|
||||
|
||||
if (status == -1) {
|
||||
if (ret == -1) {
|
||||
delete this;
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> lock(stats.mutex);
|
||||
stats.bytes_read += ret;
|
||||
lock.unlock();
|
||||
std::string stringbuf = buffer;
|
||||
|
||||
char ip[INET_ADDRSTRLEN];
|
||||
@@ -155,7 +158,7 @@ void connection_middleman::handle_read(ev::io &watcher, int events_flags) {
|
||||
std::string ip_str = ip;
|
||||
|
||||
//--- CALL WORKER
|
||||
response = work->work(stringbuf, ip_str, gzip);
|
||||
response = work->work(stringbuf, ip_str);
|
||||
|
||||
// Find out when the socket is writeable.
|
||||
// The loop in connection_mother will call handle_write when it is.
|
||||
@@ -167,13 +170,10 @@ void connection_middleman::handle_read(ev::io &watcher, int events_flags) {
|
||||
void connection_middleman::handle_write(ev::io &watcher, int events_flags) {
|
||||
write_event.stop();
|
||||
timeout_event.stop();
|
||||
std::string http_response = "HTTP/1.1 200 OK\r\nServer: Ocelot 1.0\r\nContent-Type: text/plain\r\n";
|
||||
if (gzip) {
|
||||
http_response += "Content-Encoding: gzip\r\n";
|
||||
}
|
||||
http_response += "Connection: close\r\n\r\n";
|
||||
http_response += response;
|
||||
send(connect_sock, http_response.c_str(), http_response.size(), MSG_NOSIGNAL);
|
||||
send(connect_sock, response.c_str(), response.size(), MSG_NOSIGNAL);
|
||||
std::unique_lock<std::mutex> lock(stats.mutex);
|
||||
stats.bytes_written += response.size();
|
||||
lock.unlock();
|
||||
delete this;
|
||||
}
|
||||
|
||||
|
||||
15
events.h
15
events.h
@@ -56,18 +56,8 @@ class connection_mother {
|
||||
site_comm * sc;
|
||||
ev::timer schedule_event;
|
||||
|
||||
unsigned int open_connections;
|
||||
uint64_t opened_connections;
|
||||
|
||||
public:
|
||||
connection_mother(worker * worker_obj, config * config_obj, mysql * db_obj, site_comm * sc_obj);
|
||||
|
||||
void increment_open_connections() { open_connections++; }
|
||||
void decrement_open_connections() { open_connections--; }
|
||||
|
||||
unsigned int get_open_connections() { return open_connections; }
|
||||
uint64_t get_opened_connections() { return opened_connections; }
|
||||
|
||||
void handle_connect(ev::io &watcher, int events_flags);
|
||||
~connection_mother();
|
||||
};
|
||||
@@ -87,7 +77,6 @@ class connection_middleman {
|
||||
connection_mother * mother;
|
||||
worker * work;
|
||||
sockaddr_in client_addr;
|
||||
bool gzip;
|
||||
|
||||
public:
|
||||
connection_middleman(int &listen_socket, sockaddr_in &address, socklen_t &addr_len, worker* work, connection_mother * mother_arg, config * config_obj);
|
||||
@@ -97,7 +86,3 @@ class connection_middleman {
|
||||
void handle_write(ev::io &watcher, int events_flags);
|
||||
void handle_timeout(ev::timer &watcher, int events_flags);
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
# and this notice are preserved. This file is offered as-is, without any
|
||||
# warranty.
|
||||
|
||||
#serial 15
|
||||
#serial 17
|
||||
|
||||
AC_DEFUN([AX_BOOST_SYSTEM],
|
||||
[
|
||||
@@ -83,14 +83,14 @@ AC_DEFUN([AX_BOOST_SYSTEM],
|
||||
|
||||
LDFLAGS_SAVE=$LDFLAGS
|
||||
if test "x$ax_boost_user_system_lib" = "x"; then
|
||||
for libextension in `ls $BOOSTLIBDIR/libboost_system*.a* $BOOSTLIBDIR/libboost_system*.so* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_system.*\)\.\(so\|a\).*$;\1;' | tac` ; do
|
||||
for libextension in `ls -r $BOOSTLIBDIR/libboost_system* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'` ; do
|
||||
ax_lib=${libextension}
|
||||
AC_CHECK_LIB($ax_lib, exit,
|
||||
[BOOST_SYSTEM_LIB="-l$ax_lib"; AC_SUBST(BOOST_SYSTEM_LIB) link_system="yes"; break],
|
||||
[link_system="no"])
|
||||
done
|
||||
if test "x$link_system" != "xyes"; then
|
||||
for libextension in `ls $BOOSTLIBDIR/boost_system*.{dll,a}* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_system.*\)\.\(dll\|a\).*$;\1;' | tac` ; do
|
||||
for libextension in `ls -r $BOOSTLIBDIR/boost_system* 2>/dev/null | sed 's,.*/,,' | sed -e 's,\..*,,'` ; do
|
||||
ax_lib=${libextension}
|
||||
AC_CHECK_LIB($ax_lib, exit,
|
||||
[BOOST_SYSTEM_LIB="-l$ax_lib"; AC_SUBST(BOOST_SYSTEM_LIB) link_system="yes"; break],
|
||||
|
||||
@@ -1,149 +0,0 @@
|
||||
# ===========================================================================
|
||||
# http://www.gnu.org/software/autoconf-archive/ax_boost_thread.html
|
||||
# ===========================================================================
|
||||
#
|
||||
# SYNOPSIS
|
||||
#
|
||||
# AX_BOOST_THREAD
|
||||
#
|
||||
# DESCRIPTION
|
||||
#
|
||||
# Test for Thread library from the Boost C++ libraries. The macro requires
|
||||
# a preceding call to AX_BOOST_BASE. Further documentation is available at
|
||||
# <http://randspringer.de/boost/index.html>.
|
||||
#
|
||||
# This macro calls:
|
||||
#
|
||||
# AC_SUBST(BOOST_THREAD_LIB)
|
||||
#
|
||||
# And sets:
|
||||
#
|
||||
# HAVE_BOOST_THREAD
|
||||
#
|
||||
# LICENSE
|
||||
#
|
||||
# Copyright (c) 2009 Thomas Porschberg <thomas@randspringer.de>
|
||||
# Copyright (c) 2009 Michael Tindal
|
||||
#
|
||||
# Copying and distribution of this file, with or without modification, are
|
||||
# permitted in any medium without royalty provided the copyright notice
|
||||
# and this notice are preserved. This file is offered as-is, without any
|
||||
# warranty.
|
||||
|
||||
#serial 25
|
||||
|
||||
AC_DEFUN([AX_BOOST_THREAD],
|
||||
[
|
||||
AC_ARG_WITH([boost-thread],
|
||||
AS_HELP_STRING([--with-boost-thread@<:@=special-lib@:>@],
|
||||
[use the Thread library from boost - it is possible to specify a certain library for the linker
|
||||
e.g. --with-boost-thread=boost_thread-gcc-mt ]),
|
||||
[
|
||||
if test "$withval" = "no"; then
|
||||
want_boost="no"
|
||||
elif test "$withval" = "yes"; then
|
||||
want_boost="yes"
|
||||
ax_boost_user_thread_lib=""
|
||||
else
|
||||
want_boost="yes"
|
||||
ax_boost_user_thread_lib="$withval"
|
||||
fi
|
||||
],
|
||||
[want_boost="yes"]
|
||||
)
|
||||
|
||||
if test "x$want_boost" = "xyes"; then
|
||||
AC_REQUIRE([AC_PROG_CC])
|
||||
AC_REQUIRE([AC_CANONICAL_BUILD])
|
||||
CPPFLAGS_SAVED="$CPPFLAGS"
|
||||
CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
|
||||
export CPPFLAGS
|
||||
|
||||
LDFLAGS_SAVED="$LDFLAGS"
|
||||
LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
|
||||
export LDFLAGS
|
||||
|
||||
AC_CACHE_CHECK(whether the Boost::Thread library is available,
|
||||
ax_cv_boost_thread,
|
||||
[AC_LANG_PUSH([C++])
|
||||
CXXFLAGS_SAVE=$CXXFLAGS
|
||||
|
||||
if test "x$host_os" = "xsolaris" ; then
|
||||
CXXFLAGS="-pthreads $CXXFLAGS"
|
||||
elif test "x$host_os" = "xmingw32" ; then
|
||||
CXXFLAGS="-mthreads $CXXFLAGS"
|
||||
else
|
||||
CXXFLAGS="-pthread $CXXFLAGS"
|
||||
fi
|
||||
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/thread/thread.hpp>]],
|
||||
[[boost::thread_group thrds;
|
||||
return 0;]])],
|
||||
ax_cv_boost_thread=yes, ax_cv_boost_thread=no)
|
||||
CXXFLAGS=$CXXFLAGS_SAVE
|
||||
AC_LANG_POP([C++])
|
||||
])
|
||||
if test "x$ax_cv_boost_thread" = "xyes"; then
|
||||
if test "x$host_os" = "xsolaris" ; then
|
||||
BOOST_CPPFLAGS="-pthreads $BOOST_CPPFLAGS"
|
||||
elif test "x$host_os" = "xmingw32" ; then
|
||||
BOOST_CPPFLAGS="-mthreads $BOOST_CPPFLAGS"
|
||||
else
|
||||
BOOST_CPPFLAGS="-pthread $BOOST_CPPFLAGS"
|
||||
fi
|
||||
|
||||
AC_SUBST(BOOST_CPPFLAGS)
|
||||
|
||||
AC_DEFINE(HAVE_BOOST_THREAD,,[define if the Boost::Thread library is available])
|
||||
BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'`
|
||||
|
||||
LDFLAGS_SAVE=$LDFLAGS
|
||||
case "x$host_os" in
|
||||
*bsd* )
|
||||
LDFLAGS="-pthread $LDFLAGS"
|
||||
break;
|
||||
;;
|
||||
esac
|
||||
if test "x$ax_boost_user_thread_lib" = "x"; then
|
||||
for libextension in `ls $BOOSTLIBDIR/libboost_thread*.so* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_thread.*\)\.so.*$;\1;'` `ls $BOOSTLIBDIR/libboost_thread*.dylib* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(libboost_thread.*\)\.dylib.*$;\1;'` `ls $BOOSTLIBDIR/libboost_thread*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_thread.*\)\.a.*$;\1;'`; do
|
||||
ax_lib=${libextension}
|
||||
AC_CHECK_LIB($ax_lib, exit,
|
||||
[BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST(BOOST_THREAD_LIB) link_thread="yes"; break],
|
||||
[link_thread="no"])
|
||||
done
|
||||
if test "x$link_thread" != "xyes"; then
|
||||
for libextension in `ls $BOOSTLIBDIR/boost_thread*.dll* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_thread.*\)\.dll.*$;\1;'` `ls $BOOSTLIBDIR/boost_thread*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_thread.*\)\.a.*$;\1;'` ; do
|
||||
ax_lib=${libextension}
|
||||
AC_CHECK_LIB($ax_lib, exit,
|
||||
[BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST(BOOST_THREAD_LIB) link_thread="yes"; break],
|
||||
[link_thread="no"])
|
||||
done
|
||||
fi
|
||||
|
||||
else
|
||||
for ax_lib in $ax_boost_user_thread_lib boost_thread-$ax_boost_user_thread_lib; do
|
||||
AC_CHECK_LIB($ax_lib, exit,
|
||||
[BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST(BOOST_THREAD_LIB) link_thread="yes"; break],
|
||||
[link_thread="no"])
|
||||
done
|
||||
|
||||
fi
|
||||
if test "x$ax_lib" = "x"; then
|
||||
AC_MSG_ERROR(Could not find a version of the library!)
|
||||
fi
|
||||
if test "x$link_thread" = "xno"; then
|
||||
AC_MSG_ERROR(Could not link against $ax_lib !)
|
||||
else
|
||||
case "x$host_os" in
|
||||
*bsd* )
|
||||
BOOST_LDFLAGS="-pthread $BOOST_LDFLAGS"
|
||||
break;
|
||||
;;
|
||||
esac
|
||||
|
||||
fi
|
||||
fi
|
||||
|
||||
CPPFLAGS="$CPPFLAGS_SAVED"
|
||||
LDFLAGS="$LDFLAGS_SAVED"
|
||||
fi
|
||||
])
|
||||
72
m4/ax_check_compile_flag.m4
Normal file
72
m4/ax_check_compile_flag.m4
Normal file
@@ -0,0 +1,72 @@
|
||||
# ===========================================================================
|
||||
# http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html
|
||||
# ===========================================================================
|
||||
#
|
||||
# SYNOPSIS
|
||||
#
|
||||
# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS])
|
||||
#
|
||||
# DESCRIPTION
|
||||
#
|
||||
# Check whether the given FLAG works with the current language's compiler
|
||||
# or gives an error. (Warnings, however, are ignored)
|
||||
#
|
||||
# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on
|
||||
# success/failure.
|
||||
#
|
||||
# If EXTRA-FLAGS is defined, it is added to the current language's default
|
||||
# flags (e.g. CFLAGS) when the check is done. The check is thus made with
|
||||
# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to
|
||||
# force the compiler to issue an error when a bad flag is given.
|
||||
#
|
||||
# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this
|
||||
# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG.
|
||||
#
|
||||
# LICENSE
|
||||
#
|
||||
# Copyright (c) 2008 Guido U. Draheim <guidod@gmx.de>
|
||||
# Copyright (c) 2011 Maarten Bosmans <mkbosmans@gmail.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License as published by the
|
||||
# Free Software Foundation, either version 3 of the License, or (at your
|
||||
# option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
|
||||
# Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# As a special exception, the respective Autoconf Macro's copyright owner
|
||||
# gives unlimited permission to copy, distribute and modify the configure
|
||||
# scripts that are the output of Autoconf when processing the Macro. You
|
||||
# need not follow the terms of the GNU General Public License when using
|
||||
# or distributing such scripts, even though portions of the text of the
|
||||
# Macro appear in them. The GNU General Public License (GPL) does govern
|
||||
# all other use of the material that constitutes the Autoconf Macro.
|
||||
#
|
||||
# This special exception to the GPL applies to versions of the Autoconf
|
||||
# Macro released by the Autoconf Archive. When you make and distribute a
|
||||
# modified version of the Autoconf Macro, you may extend this special
|
||||
# exception to the GPL to apply to your modified version as well.
|
||||
|
||||
#serial 2
|
||||
|
||||
AC_DEFUN([AX_CHECK_COMPILE_FLAG],
|
||||
[AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX
|
||||
AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl
|
||||
AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [
|
||||
ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS
|
||||
_AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1"
|
||||
AC_COMPILE_IFELSE([AC_LANG_PROGRAM()],
|
||||
[AS_VAR_SET(CACHEVAR,[yes])],
|
||||
[AS_VAR_SET(CACHEVAR,[no])])
|
||||
_AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags])
|
||||
AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes],
|
||||
[m4_default([$2], :)],
|
||||
[m4_default([$3], :)])
|
||||
AS_VAR_POPDEF([CACHEVAR])dnl
|
||||
])dnl AX_CHECK_COMPILE_FLAGS
|
||||
@@ -91,7 +91,7 @@ AC_DEFUN([EV_DEVEL],
|
||||
/usr/lib) ;;
|
||||
*) LDFLAGS="$LDFLAGS -L${ac_cv_ev_lib}" ;;
|
||||
esac
|
||||
CPPFLAGS="$CPPFLAGS -I${ac_cv_ev_inc} -I${MYSQL_C_INC_DIR}"
|
||||
CPPFLAGS="$CPPFLAGS -I${ac_cv_ev_inc}"
|
||||
AC_MSG_CHECKING([that we can build libev programs])
|
||||
AC_COMPILE_IFELSE(
|
||||
[AC_LANG_PROGRAM([#include <ev++.h>],
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
AC_DEFUN([TCMALLOC],
|
||||
[AC_ARG_WITH(
|
||||
[tcmalloc],
|
||||
[AS_HELP_STRING([--with-tcmalloc], [use tcmalloc if it's available])],
|
||||
[AS_HELP_STRING([--with-tcmalloc], [use tcmalloc if it's available [default=yes]])],
|
||||
[],
|
||||
[with_tcmalloc=no])
|
||||
[with_tcmalloc=yes])
|
||||
|
||||
AS_IF([test "x$with_tcmalloc" != "xno"],
|
||||
[AC_CHECK_LIB([tcmalloc], [tc_cfree], [],
|
||||
[AC_MSG_WARN([tcmalloc libraries not installed])]
|
||||
[AC_CHECK_LIB([tcmalloc], [tc_free], [],
|
||||
[AC_MSG_NOTICE([tcmalloc libraries not installed])]
|
||||
)])
|
||||
])
|
||||
|
||||
@@ -56,3 +56,26 @@ std::string hex_decode(const std::string &in) {
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
std::string bintohex(const std::string &in) {
|
||||
std::string out;
|
||||
out.reserve(40);
|
||||
size_t length = in.length();
|
||||
for (unsigned int i = 0; i < length; i++) {
|
||||
unsigned char x = (unsigned char)in[i] >> 4;
|
||||
if (x > 9) {
|
||||
x += 'a' - 10;
|
||||
} else {
|
||||
x += '0';
|
||||
}
|
||||
out.push_back(x);
|
||||
x = in[i] & 0xF;
|
||||
if (x > 9) {
|
||||
x += 'a' - 10;
|
||||
} else {
|
||||
x += '0';
|
||||
}
|
||||
out.push_back(x);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ long strtolong(const std::string& str);
|
||||
int64_t strtolonglong(const std::string& str);
|
||||
std::string inttostr(int i);
|
||||
std::string hex_decode(const std::string &in);
|
||||
std::string bintohex(const std::string &in);
|
||||
int timeval_subtract (timeval* result, timeval* x, timeval* y);
|
||||
|
||||
#endif
|
||||
|
||||
18
ocelot.cpp
18
ocelot.cpp
@@ -8,6 +8,7 @@
|
||||
|
||||
static connection_mother *mother;
|
||||
static worker *work;
|
||||
struct stats stats;
|
||||
|
||||
static void sig_handler(int sig)
|
||||
{
|
||||
@@ -32,7 +33,6 @@ int main(int argc, char **argv) {
|
||||
verbose = true;
|
||||
}
|
||||
}
|
||||
|
||||
mysql db(conf.mysql_db, conf.mysql_host, conf.mysql_username, conf.mysql_password);
|
||||
db.verbose_flush = verbose;
|
||||
|
||||
@@ -46,16 +46,28 @@ int main(int argc, char **argv) {
|
||||
std::cout << "Assuming no whitelist desired, disabling" << std::endl;
|
||||
}
|
||||
|
||||
std::unordered_map<std::string, user> users_list;
|
||||
user_list users_list;
|
||||
db.load_users(users_list);
|
||||
std::cout << "Loaded " << users_list.size() << " users" << std::endl;
|
||||
|
||||
std::unordered_map<std::string, torrent> torrents_list;
|
||||
torrent_list torrents_list;
|
||||
db.load_torrents(torrents_list);
|
||||
std::cout << "Loaded " << torrents_list.size() << " torrents" << std::endl;
|
||||
|
||||
db.load_tokens(torrents_list);
|
||||
|
||||
stats.open_connections = 0;
|
||||
stats.opened_connections = 0;
|
||||
stats.connection_rate = 0;
|
||||
stats.leechers = 0;
|
||||
stats.seeders = 0;
|
||||
stats.announcements = 0;
|
||||
stats.succ_announcements = 0;
|
||||
stats.scrapes = 0;
|
||||
stats.bytes_read = 0;
|
||||
stats.bytes_written = 0;
|
||||
stats.start_time = time(NULL);
|
||||
|
||||
// Create worker object, which handles announces and scrapes and all that jazz
|
||||
work = new worker(torrents_list, users_list, whitelist, &conf, &db, &sc);
|
||||
|
||||
|
||||
34
ocelot.h
34
ocelot.h
@@ -3,13 +3,16 @@
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
#include <set>
|
||||
#include <boost/thread/thread.hpp>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#ifndef OCELOT_H
|
||||
#define OCELOT_H
|
||||
|
||||
class user;
|
||||
typedef std::shared_ptr<user> user_ptr;
|
||||
|
||||
typedef struct {
|
||||
int userid;
|
||||
unsigned int port;
|
||||
int64_t uploaded;
|
||||
int64_t downloaded;
|
||||
@@ -20,6 +23,7 @@ typedef struct {
|
||||
unsigned int announces;
|
||||
bool visible;
|
||||
bool invalid_ip;
|
||||
user_ptr user;
|
||||
std::string ip_port;
|
||||
std::string ip;
|
||||
} peer;
|
||||
@@ -30,7 +34,6 @@ enum freetype { NORMAL, FREE, NEUTRAL };
|
||||
|
||||
typedef struct {
|
||||
int id;
|
||||
time_t last_seeded;
|
||||
int64_t balance;
|
||||
int completed;
|
||||
freetype free_torrent;
|
||||
@@ -41,12 +44,6 @@ typedef struct {
|
||||
std::set<int> tokened_users;
|
||||
} torrent;
|
||||
|
||||
typedef struct {
|
||||
int id;
|
||||
bool can_leech;
|
||||
bool protect_ip;
|
||||
} user;
|
||||
|
||||
enum {
|
||||
DUPE, // 0
|
||||
TRUMP, // 1
|
||||
@@ -78,8 +75,23 @@ typedef struct {
|
||||
time_t time;
|
||||
} del_message;
|
||||
|
||||
|
||||
typedef std::unordered_map<std::string, torrent> torrent_list;
|
||||
typedef std::unordered_map<std::string, user> user_list;
|
||||
typedef std::unordered_map<std::string, user_ptr> user_list;
|
||||
typedef std::unordered_map<std::string, std::string> params_type;
|
||||
|
||||
struct stats {
|
||||
std::mutex mutex;
|
||||
unsigned int open_connections;
|
||||
uint64_t opened_connections;
|
||||
uint64_t connection_rate;
|
||||
unsigned int leechers;
|
||||
unsigned int seeders;
|
||||
uint64_t announcements;
|
||||
uint64_t succ_announcements;
|
||||
uint64_t scrapes;
|
||||
uint64_t bytes_read;
|
||||
uint64_t bytes_written;
|
||||
time_t start_time;
|
||||
};
|
||||
extern struct stats stats;
|
||||
#endif
|
||||
|
||||
54
report.cpp
Normal file
54
report.cpp
Normal file
@@ -0,0 +1,54 @@
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <sstream>
|
||||
#include "ocelot.h"
|
||||
#include "misc_functions.h"
|
||||
#include "report.h"
|
||||
#include "response.h"
|
||||
#include "user.h"
|
||||
|
||||
std::string report(params_type ¶ms, user_list &users_list) {
|
||||
std::stringstream output;
|
||||
std::string action = params["get"];
|
||||
if (action == "") {
|
||||
output << "Invalid action\n";
|
||||
} else if (action == "stats") {
|
||||
time_t uptime = time(NULL) - stats.start_time;
|
||||
int up_d = uptime / 86400;
|
||||
uptime -= up_d * 86400;
|
||||
int up_h = uptime / 3600;
|
||||
uptime -= up_h * 3600;
|
||||
int up_m = uptime / 60;
|
||||
int up_s = uptime - up_m * 60;
|
||||
std::string up_ht = up_h <= 9 ? '0' + std::to_string(up_h) : std::to_string(up_h);
|
||||
std::string up_mt = up_m <= 9 ? '0' + std::to_string(up_m) : std::to_string(up_m);
|
||||
std::string up_st = up_s <= 9 ? '0' + std::to_string(up_s) : std::to_string(up_s);
|
||||
|
||||
output << "Uptime: " << up_d << " days, " << up_ht << ':' << up_mt << ':' << up_st << '\n'
|
||||
<< stats.opened_connections << " connections opened\n"
|
||||
<< stats.open_connections << " open connections\n"
|
||||
<< stats.connection_rate << " connections/s\n"
|
||||
<< stats.succ_announcements << " successful announcements\n"
|
||||
<< (stats.announcements - stats.succ_announcements) << " failed announcements\n"
|
||||
<< stats.scrapes << " scrapes\n"
|
||||
<< stats.leechers << " leechers tracked\n"
|
||||
<< stats.seeders << " seeders tracked\n"
|
||||
<< stats.bytes_read << " bytes read\n"
|
||||
<< stats.bytes_written << " bytes written\n";
|
||||
} else if (action == "user") {
|
||||
std::string key = params["key"];
|
||||
if (key == "") {
|
||||
output << "Invalid action\n";
|
||||
} else {
|
||||
user_list::const_iterator u = users_list.find(key);
|
||||
if (u != users_list.end()) {
|
||||
output << u->second->get_leeching() << " leeching\n"
|
||||
<< u->second->get_seeding() << " seeding\n";
|
||||
}
|
||||
}
|
||||
} else {
|
||||
output << "Invalid action\n";
|
||||
}
|
||||
output << "success";
|
||||
return response(output.str(), false, false);
|
||||
}
|
||||
4
report.h
Normal file
4
report.h
Normal file
@@ -0,0 +1,4 @@
|
||||
#include <string>
|
||||
#include "ocelot.h"
|
||||
|
||||
std::string report(params_type ¶ms, user_list &users_list);
|
||||
49
response.cpp
Normal file
49
response.cpp
Normal file
@@ -0,0 +1,49 @@
|
||||
#include "response.h"
|
||||
#include "misc_functions.h"
|
||||
#include <sstream>
|
||||
#include <boost/iostreams/filtering_streambuf.hpp>
|
||||
#include <boost/iostreams/copy.hpp>
|
||||
#include <boost/iostreams/filter/gzip.hpp>
|
||||
|
||||
std::string response(const std::string &body, bool gzip, bool html) {
|
||||
const std::string head = response_head(gzip, html);
|
||||
std::string out;
|
||||
bool processed = false;
|
||||
if (html) {
|
||||
out = "<html><head><meta name=\"robots\" content=\"noindex, nofollow\" /></head><body>" + body + "</body></html>";
|
||||
processed = true;
|
||||
}
|
||||
if (gzip) {
|
||||
std::stringstream ss, zss;
|
||||
ss << body;
|
||||
boost::iostreams::filtering_streambuf<boost::iostreams::input> in;
|
||||
in.push(boost::iostreams::gzip_compressor());
|
||||
in.push(ss);
|
||||
boost::iostreams::copy(in, zss);
|
||||
out = zss.str();
|
||||
processed = true;
|
||||
}
|
||||
if (processed) {
|
||||
return head + out;
|
||||
}
|
||||
return head + body;
|
||||
}
|
||||
|
||||
std::string response_head(bool gzip, bool html) {
|
||||
const std::string content_type = html ? "text/html" : "text/plain";
|
||||
std::string head = "HTTP/1.1 200 OK\r\nServer: Ocelot 1.0";
|
||||
head += "\r\nContent-Type: " + content_type;
|
||||
if (gzip) {
|
||||
head += "\r\nContent-Encoding: gzip";
|
||||
}
|
||||
head += "\r\nConnection: close\r\n\r\n";
|
||||
return head;
|
||||
}
|
||||
|
||||
std::string error(const std::string &err) {
|
||||
return response("d14:failure reason" + inttostr(err.length()) + ':' + err + "12:min intervali5400e8:intervali5400ee", false, false);
|
||||
}
|
||||
|
||||
std::string warning(const std::string &msg) {
|
||||
return "15:warning message" + inttostr(msg.length()) + ':' + msg;
|
||||
}
|
||||
6
response.h
Normal file
6
response.h
Normal file
@@ -0,0 +1,6 @@
|
||||
#include <string>
|
||||
|
||||
std::string response(const std::string &body, bool gzip, bool html);
|
||||
std::string response_head(bool gzip, bool html);
|
||||
std::string error(const std::string &err);
|
||||
std::string warning(const std::string &msg);
|
||||
14
schedule.cpp
14
schedule.cpp
@@ -14,19 +14,19 @@ schedule::schedule(connection_mother * mother_obj, worker* worker_obj, config* c
|
||||
}
|
||||
//---------- Schedule - gets called every schedule_interval seconds
|
||||
void schedule::handle(ev::timer &watcher, int events_flags) {
|
||||
|
||||
stats.connection_rate = (stats.opened_connections - last_opened_connections) / conf->schedule_interval;
|
||||
if (counter % 20 == 0) {
|
||||
std::cout << "Schedule run #" << counter << " - open: " << mother->get_open_connections() << ", opened: "
|
||||
<< mother->get_opened_connections() << ", speed: "
|
||||
<< ((mother->get_opened_connections()-last_opened_connections)/conf->schedule_interval) << "/s" << std::endl;
|
||||
std::cout << "Schedule run #" << counter << " - open: " << stats.open_connections << ", opened: "
|
||||
<< stats.opened_connections << ", speed: "
|
||||
<< stats.connection_rate << "/s" << std::endl;
|
||||
}
|
||||
|
||||
if ((work->get_status() == CLOSING) && db->all_clear() && sc->all_clear()) {
|
||||
if (work->get_status() == CLOSING && db->all_clear() && sc->all_clear()) {
|
||||
std::cout << "all clear, shutting down" << std::endl;
|
||||
exit(0);
|
||||
}
|
||||
|
||||
last_opened_connections = mother->get_opened_connections();
|
||||
last_opened_connections = stats.opened_connections;
|
||||
|
||||
db->flush();
|
||||
sc->flush_tokens();
|
||||
@@ -34,7 +34,7 @@ void schedule::handle(ev::timer &watcher, int events_flags) {
|
||||
time_t cur_time = time(NULL);
|
||||
|
||||
if (cur_time > next_reap_peers) {
|
||||
work->reap_peers();
|
||||
work->start_reaper();
|
||||
next_reap_peers = cur_time + conf->reap_peers_interval;
|
||||
}
|
||||
|
||||
|
||||
@@ -4,10 +4,9 @@
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <queue>
|
||||
#include <mutex>
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/thread/thread.hpp>
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <boost/thread/locks.hpp>
|
||||
#include <thread>
|
||||
|
||||
#include "config.h"
|
||||
#include "site_comm.h"
|
||||
@@ -34,7 +33,7 @@ void site_comm::expire_token(int torrent, int user)
|
||||
expire_token_buffer += token_pair.str();
|
||||
if (expire_token_buffer.length() > 350) {
|
||||
std::cout << "Flushing overloaded token buffer" << std::endl;
|
||||
boost::mutex::scoped_lock lock(expire_queue_lock);
|
||||
std::unique_lock<std::mutex> lock(expire_queue_lock);
|
||||
token_queue.push(expire_token_buffer);
|
||||
expire_token_buffer.clear();
|
||||
}
|
||||
@@ -42,7 +41,7 @@ void site_comm::expire_token(int torrent, int user)
|
||||
|
||||
void site_comm::flush_tokens()
|
||||
{
|
||||
boost::mutex::scoped_lock lock(expire_queue_lock);
|
||||
std::unique_lock<std::mutex> lock(expire_queue_lock);
|
||||
size_t qsize = token_queue.size();
|
||||
if (verbose_flush || qsize > 0) {
|
||||
std::cout << "Token expire queue size: " << qsize << std::endl;
|
||||
@@ -53,7 +52,8 @@ void site_comm::flush_tokens()
|
||||
token_queue.push(expire_token_buffer);
|
||||
expire_token_buffer.clear();
|
||||
if (t_active == false) {
|
||||
boost::thread thread(&site_comm::do_flush_tokens, this);
|
||||
std::thread thread(&site_comm::do_flush_tokens, this);
|
||||
thread.detach();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ void site_comm::do_flush_tokens()
|
||||
}
|
||||
|
||||
if (status_code == 200) {
|
||||
boost::mutex::scoped_lock lock(expire_queue_lock);
|
||||
std::unique_lock<std::mutex> lock(expire_queue_lock);
|
||||
token_queue.pop();
|
||||
} else {
|
||||
std::cout << "Response returned with status code " << status_code << " when trying to expire a token!" << std::endl;;
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
#include <string>
|
||||
#include <boost/asio.hpp>
|
||||
#include <queue>
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <mutex>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
@@ -15,7 +15,7 @@ using boost::asio::ip::tcp;
|
||||
class site_comm {
|
||||
private:
|
||||
config conf;
|
||||
boost::mutex expire_queue_lock;
|
||||
std::mutex expire_queue_lock;
|
||||
std::string expire_token_buffer;
|
||||
std::queue<std::string> token_queue;
|
||||
bool t_active;
|
||||
|
||||
51
user.cpp
Normal file
51
user.cpp
Normal file
@@ -0,0 +1,51 @@
|
||||
#include "user.h"
|
||||
|
||||
user::user(int uid, bool leech, bool protect) : id(uid), leechstatus(leech), protect_ip(protect) {
|
||||
stats.leeching = 0;
|
||||
stats.seeding = 0;
|
||||
}
|
||||
|
||||
int user::get_id() {
|
||||
return id;
|
||||
}
|
||||
|
||||
bool user::is_protected() {
|
||||
return protect_ip;
|
||||
}
|
||||
|
||||
void user::set_protected(bool status) {
|
||||
protect_ip = status;
|
||||
}
|
||||
|
||||
bool user::can_leech() {
|
||||
return leechstatus;
|
||||
}
|
||||
|
||||
void user::set_leechstatus(bool status) {
|
||||
leechstatus = status;
|
||||
}
|
||||
|
||||
// Stats methods
|
||||
unsigned int user::get_leeching() {
|
||||
return stats.leeching;
|
||||
}
|
||||
|
||||
unsigned int user::get_seeding() {
|
||||
return stats.seeding;
|
||||
}
|
||||
|
||||
void user::decr_leeching() {
|
||||
stats.leeching--;
|
||||
}
|
||||
|
||||
void user::decr_seeding() {
|
||||
stats.seeding--;
|
||||
}
|
||||
|
||||
void user::incr_leeching() {
|
||||
stats.leeching++;
|
||||
}
|
||||
|
||||
void user::incr_seeding() {
|
||||
stats.seeding++;
|
||||
}
|
||||
23
user.h
Normal file
23
user.h
Normal file
@@ -0,0 +1,23 @@
|
||||
class user {
|
||||
private:
|
||||
int id;
|
||||
bool leechstatus;
|
||||
bool protect_ip;
|
||||
struct {
|
||||
unsigned int leeching;
|
||||
unsigned int seeding;
|
||||
} stats;
|
||||
public:
|
||||
user(int uid, bool leech, bool protect);
|
||||
int get_id();
|
||||
bool is_protected();
|
||||
void set_protected(bool status);
|
||||
bool can_leech();
|
||||
void set_leechstatus(bool status);
|
||||
void decr_leeching();
|
||||
void decr_seeding();
|
||||
void incr_leeching();
|
||||
void incr_seeding();
|
||||
unsigned int get_leeching();
|
||||
unsigned int get_seeding();
|
||||
};
|
||||
541
worker.cpp
541
worker.cpp
@@ -9,6 +9,8 @@
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <algorithm>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#include <netinet/in.h>
|
||||
#include <arpa/inet.h>
|
||||
@@ -19,14 +21,11 @@
|
||||
#include "worker.h"
|
||||
#include "misc_functions.h"
|
||||
#include "site_comm.h"
|
||||
#include "response.h"
|
||||
#include "report.h"
|
||||
#include "user.h"
|
||||
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <boost/thread/thread.hpp>
|
||||
#include <boost/thread/locks.hpp>
|
||||
#include <boost/bind.hpp>
|
||||
#include <boost/iostreams/filtering_streambuf.hpp>
|
||||
#include <boost/iostreams/copy.hpp>
|
||||
#include <boost/iostreams/filter/gzip.hpp>
|
||||
|
||||
//---------- Worker - does stuff with input
|
||||
worker::worker(torrent_list &torrents, user_list &users, std::vector<std::string> &_whitelist, config * conf_obj, mysql * db_obj, site_comm * sc) : torrents_list(torrents), users_list(users), whitelist(_whitelist), conf(conf_obj), db(db_obj), s_comm(sc)
|
||||
@@ -45,7 +44,7 @@ bool worker::signal(int sig) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
std::string worker::work(std::string &input, std::string &ip, bool &gzip) {
|
||||
std::string worker::work(std::string &input, std::string &ip) {
|
||||
unsigned int input_length = input.length();
|
||||
|
||||
//---------- Parse request - ugly but fast. Using substr exploded.
|
||||
@@ -59,11 +58,6 @@ std::string worker::work(std::string &input, std::string &ip, bool &gzip) {
|
||||
std::string passkey;
|
||||
passkey.reserve(32);
|
||||
if (input[37] != '/') {
|
||||
/* This didn't work as intended. We want the crawler to download the meta tag
|
||||
if (input.substr(5, 10) == "robots.txt") {
|
||||
// Let's just hope that no crawler has a / at pos 37
|
||||
return "User-agent: *\nDisallow: /";
|
||||
}*/
|
||||
return error("Malformed announce");
|
||||
}
|
||||
|
||||
@@ -75,16 +69,19 @@ std::string worker::work(std::string &input, std::string &ip, bool &gzip) {
|
||||
|
||||
// Get the action
|
||||
enum action_t {
|
||||
INVALID = 0, ANNOUNCE, SCRAPE, UPDATE
|
||||
INVALID = 0, ANNOUNCE, SCRAPE, UPDATE, REPORT
|
||||
};
|
||||
action_t action = INVALID;
|
||||
|
||||
std::unique_lock<std::mutex> lock(stats.mutex);
|
||||
switch (input[pos]) {
|
||||
case 'a':
|
||||
stats.announcements++;
|
||||
action = ANNOUNCE;
|
||||
pos += 8;
|
||||
break;
|
||||
case 's':
|
||||
stats.scrapes++;
|
||||
action = SCRAPE;
|
||||
pos += 6;
|
||||
break;
|
||||
@@ -92,24 +89,30 @@ std::string worker::work(std::string &input, std::string &ip, bool &gzip) {
|
||||
action = UPDATE;
|
||||
pos += 6;
|
||||
break;
|
||||
case 'r':
|
||||
action = REPORT;
|
||||
pos += 6;
|
||||
break;
|
||||
}
|
||||
if (action == INVALID) {
|
||||
return error("invalid action");
|
||||
}
|
||||
lock.unlock();
|
||||
|
||||
if (input[pos] != '?') {
|
||||
// No parameters given. Probably means we're not talking to a torrent client
|
||||
return "<html><head><meta name=\"robots\" content=\"noindex, nofollow\" /></head><body>Nothing to see here</body></html>";
|
||||
return response("Nothing to see here", false, true);
|
||||
}
|
||||
|
||||
if ((status != OPEN) && (action != UPDATE)) {
|
||||
if (status != OPEN && action != UPDATE) {
|
||||
return error("The tracker is temporarily unavailable.");
|
||||
}
|
||||
|
||||
if (action == INVALID) {
|
||||
return error("Invalid action");
|
||||
}
|
||||
|
||||
// Parse URL params
|
||||
std::list<std::string> infohashes; // For scrape only
|
||||
|
||||
std::map<std::string, std::string> params;
|
||||
params_type params;
|
||||
std::string key;
|
||||
std::string value;
|
||||
bool parsing_key = true; // true = key, false = value
|
||||
@@ -142,7 +145,7 @@ std::string worker::work(std::string &input, std::string &ip, bool &gzip) {
|
||||
pos += 10; // skip 'HTTP/1.1' - should probably be +=11, but just in case a client doesn't send \r
|
||||
|
||||
// Parse headers
|
||||
std::map<std::string, std::string> headers;
|
||||
params_type headers;
|
||||
parsing_key = true;
|
||||
bool found_data = false;
|
||||
|
||||
@@ -170,8 +173,6 @@ std::string worker::work(std::string &input, std::string &ip, bool &gzip) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (action == UPDATE) {
|
||||
if (passkey == conf->site_password) {
|
||||
return update(params);
|
||||
@@ -180,6 +181,14 @@ std::string worker::work(std::string &input, std::string &ip, bool &gzip) {
|
||||
}
|
||||
}
|
||||
|
||||
if (action == REPORT) {
|
||||
if (passkey == conf->report_password) {
|
||||
return report(params, users_list);
|
||||
} else {
|
||||
return error("Authentication failure");
|
||||
}
|
||||
}
|
||||
|
||||
// Either a scrape or an announce
|
||||
|
||||
user_list::iterator u = users_list.find(passkey);
|
||||
@@ -188,13 +197,13 @@ std::string worker::work(std::string &input, std::string &ip, bool &gzip) {
|
||||
}
|
||||
|
||||
if (action == ANNOUNCE) {
|
||||
boost::mutex::scoped_lock lock(db->torrent_list_mutex);
|
||||
std::unique_lock<std::mutex> tl_lock(db->torrent_list_mutex);
|
||||
// Let's translate the infohash into something nice
|
||||
// info_hash is a url encoded (hex) base 20 number
|
||||
std::string info_hash_decoded = hex_decode(params["info_hash"]);
|
||||
torrent_list::iterator tor = torrents_list.find(info_hash_decoded);
|
||||
if (tor == torrents_list.end()) {
|
||||
boost::mutex::scoped_lock lock(del_reasons_lock);
|
||||
std::unique_lock<std::mutex> dr_lock(del_reasons_lock);
|
||||
auto msg = del_reasons.find(info_hash_decoded);
|
||||
if (msg != del_reasons.end()) {
|
||||
if (msg->second.reason != -1) {
|
||||
@@ -206,42 +215,26 @@ std::string worker::work(std::string &input, std::string &ip, bool &gzip) {
|
||||
return error("Unregistered torrent");
|
||||
}
|
||||
}
|
||||
return announce(tor->second, u->second, params, headers, ip, gzip);
|
||||
return announce(tor->second, u->second, params, headers, ip);
|
||||
} else {
|
||||
return scrape(infohashes, headers, gzip);
|
||||
return scrape(infohashes, headers);
|
||||
}
|
||||
}
|
||||
|
||||
std::string worker::error(std::string err) {
|
||||
std::string output = "d14:failure reason";
|
||||
output += inttostr(err.length());
|
||||
output += ':';
|
||||
output += err;
|
||||
output += 'e';
|
||||
return output;
|
||||
}
|
||||
|
||||
std::string worker::warning(std::string msg) {
|
||||
std::string output = "15:warning message";
|
||||
output += inttostr(msg.length());
|
||||
output += ':';
|
||||
output += msg;
|
||||
return output;
|
||||
}
|
||||
|
||||
std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::string> ¶ms, std::map<std::string, std::string> &headers, std::string &ip, bool &gzip) {
|
||||
std::string worker::announce(torrent &tor, user_ptr &u, params_type ¶ms, params_type &headers, std::string &ip) {
|
||||
cur_time = time(NULL);
|
||||
|
||||
if (params["compact"] != "1") {
|
||||
return error("Your client does not support compact announces");
|
||||
}
|
||||
bool gzip = false;
|
||||
|
||||
int64_t left = strtolonglong(params["left"]);
|
||||
int64_t left = std::max((int64_t)0, strtolonglong(params["left"]));
|
||||
int64_t uploaded = std::max((int64_t)0, strtolonglong(params["uploaded"]));
|
||||
int64_t downloaded = std::max((int64_t)0, strtolonglong(params["downloaded"]));
|
||||
int64_t corrupt = strtolonglong(params["corrupt"]);
|
||||
int64_t corrupt = std::max((int64_t)0, strtolonglong(params["corrupt"]));
|
||||
|
||||
int snatches = 0; // This is the value that gets sent to the database on a snatch
|
||||
int snatched = 0; // This is the value that gets sent to the database on a snatch
|
||||
int active = 1; // This is the value that marks a peer as active/inactive in the database
|
||||
bool inserted = false; // If we insert the peer as opposed to update
|
||||
bool update_torrent = false; // Whether or not we should update the torrent in the DB
|
||||
@@ -249,8 +242,10 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
bool stopped_torrent = false; // Was the torrent just stopped?
|
||||
bool expire_token = false; // Whether or not to expire a token after torrent completion
|
||||
bool peer_changed = false; // Whether or not the peer is new or has changed since the last announcement
|
||||
bool invalid_ip = false;
|
||||
bool inc_l = false, inc_s = false, dec_l = false, dec_s = false;
|
||||
|
||||
std::map<std::string, std::string>::const_iterator peer_id_iterator = params.find("peer_id");
|
||||
params_type::const_iterator peer_id_iterator = params.find("peer_id");
|
||||
if (peer_id_iterator == params.end()) {
|
||||
return error("No peer ID");
|
||||
}
|
||||
@@ -265,84 +260,77 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
return error("Your client is not on the whitelist");
|
||||
}
|
||||
}
|
||||
|
||||
if (params["event"] == "completed") {
|
||||
// Don't update <snatches> here as we may decide to use other conditions later on
|
||||
completed_torrent = true;
|
||||
// Don't update <snatched> here as we may decide to use other conditions later on
|
||||
completed_torrent = (left == 0); // Sanity check just to be extra safe
|
||||
} else if (params["event"] == "stopped") {
|
||||
stopped_torrent = true;
|
||||
peer_changed = true;
|
||||
update_torrent = true;
|
||||
active = 0;
|
||||
}
|
||||
|
||||
int userid = u->get_id();
|
||||
peer * p;
|
||||
peer_list::iterator i;
|
||||
peer_list::iterator peer_it;
|
||||
// Insert/find the peer in the torrent list
|
||||
if (left > 0) {
|
||||
i = tor.leechers.find(peer_id);
|
||||
if (i == tor.leechers.end()) {
|
||||
peer new_peer;
|
||||
std::pair<peer_list::iterator, bool> insert
|
||||
= tor.leechers.insert(std::pair<std::string, peer>(peer_id, new_peer));
|
||||
|
||||
p = &(insert.first->second);
|
||||
peer_it = tor.leechers.find(peer_id);
|
||||
if (peer_it == tor.leechers.end()) {
|
||||
// We could search the seed list as well, but the peer reaper will sort things out eventually
|
||||
peer_it = add_peer(tor.leechers, peer_id);
|
||||
inserted = true;
|
||||
} else {
|
||||
p = &i->second;
|
||||
inc_l = true;
|
||||
}
|
||||
} else if (completed_torrent) {
|
||||
i = tor.leechers.find(peer_id);
|
||||
if (i == tor.leechers.end()) {
|
||||
peer new_peer;
|
||||
std::pair<peer_list::iterator, bool> insert
|
||||
= tor.seeders.insert(std::pair<std::string, peer>(peer_id, new_peer));
|
||||
|
||||
p = &(insert.first->second);
|
||||
inserted = true;
|
||||
} else {
|
||||
p = &i->second;
|
||||
peer_it = tor.leechers.find(peer_id);
|
||||
if (peer_it == tor.leechers.end()) {
|
||||
peer_it = tor.seeders.find(peer_id);
|
||||
if (peer_it == tor.seeders.end()) {
|
||||
peer_it = add_peer(tor.seeders, peer_id);
|
||||
inserted = true;
|
||||
inc_s = true;
|
||||
} else {
|
||||
completed_torrent = false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
i = tor.seeders.find(peer_id);
|
||||
if (i == tor.seeders.end()) {
|
||||
i = tor.leechers.find(peer_id);
|
||||
if (i == tor.leechers.end()) {
|
||||
peer new_peer;
|
||||
std::pair<peer_list::iterator, bool> insert
|
||||
= tor.seeders.insert(std::pair<std::string, peer>(peer_id, new_peer));
|
||||
|
||||
p = &(insert.first->second);
|
||||
peer_it = tor.seeders.find(peer_id);
|
||||
if (peer_it == tor.seeders.end()) {
|
||||
peer_it = tor.leechers.find(peer_id);
|
||||
if (peer_it == tor.leechers.end()) {
|
||||
peer_it = add_peer(tor.seeders, peer_id);
|
||||
inserted = true;
|
||||
} else {
|
||||
p = &i->second;
|
||||
p = &peer_it->second;
|
||||
std::pair<peer_list::iterator, bool> insert
|
||||
= tor.seeders.insert(std::pair<std::string, peer>(peer_id, *p));
|
||||
tor.leechers.erase(peer_id);
|
||||
if (downloaded > 0) {
|
||||
std::cout << "Found unreported snatch from user " << u.id << " on torrent " << tor.id << std::endl;
|
||||
}
|
||||
p = &(insert.first->second);
|
||||
tor.leechers.erase(peer_it);
|
||||
/*if (downloaded > 0) {
|
||||
std::cout << "Found unreported snatch from user " << userid << " on torrent " << tor.id << std::endl;
|
||||
}*/
|
||||
peer_it = insert.first;
|
||||
peer_changed = true;
|
||||
// completed_torrent = true; // Not sure if we want to do this. Will cause massive spam for weird clients (e.g. µTorrent 3 and Deluge)
|
||||
dec_l = true;
|
||||
}
|
||||
} else {
|
||||
p = &i->second;
|
||||
inc_s = true;
|
||||
}
|
||||
|
||||
tor.last_seeded = cur_time;
|
||||
}
|
||||
p = &peer_it->second;
|
||||
|
||||
int64_t upspeed = 0;
|
||||
int64_t downspeed = 0;
|
||||
if (inserted || params["event"] == "started") {
|
||||
//New peer on this torrent
|
||||
// New peer on this torrent (maybe)
|
||||
update_torrent = true;
|
||||
p->userid = u.id;
|
||||
if (inserted) {
|
||||
// If this was an existing peer, the user pointer will be corrected later
|
||||
p->user = u;
|
||||
}
|
||||
p->first_announced = cur_time;
|
||||
p->last_announced = 0;
|
||||
p->uploaded = uploaded;
|
||||
@@ -386,7 +374,7 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
upspeed = uploaded_change / (cur_time - p->last_announced);
|
||||
downspeed = downloaded_change / (cur_time - p->last_announced);
|
||||
}
|
||||
std::set<int>::iterator sit = tor.tokened_users.find(u.id);
|
||||
std::set<int>::iterator sit = tor.tokened_users.find(userid);
|
||||
if (tor.free_torrent == NEUTRAL) {
|
||||
downloaded_change = 0;
|
||||
uploaded_change = 0;
|
||||
@@ -394,7 +382,7 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
if (sit != tor.tokened_users.end()) {
|
||||
expire_token = true;
|
||||
std::stringstream record;
|
||||
record << '(' << u.id << ',' << tor.id << ',' << downloaded_change << ')';
|
||||
record << '(' << userid << ',' << tor.id << ',' << downloaded_change << ')';
|
||||
std::string record_str = record.str();
|
||||
db->record_token(record_str);
|
||||
}
|
||||
@@ -403,7 +391,7 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
|
||||
if (uploaded_change || downloaded_change) {
|
||||
std::stringstream record;
|
||||
record << '(' << u.id << ',' << uploaded_change << ',' << downloaded_change << ')';
|
||||
record << '(' << userid << ',' << uploaded_change << ',' << downloaded_change << ')';
|
||||
std::string record_str = record.str();
|
||||
db->record_user(record_str);
|
||||
}
|
||||
@@ -411,7 +399,7 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
}
|
||||
p->left = left;
|
||||
|
||||
std::map<std::string, std::string>::const_iterator param_ip = params.find("ip");
|
||||
params_type::const_iterator param_ip = params.find("ip");
|
||||
if (param_ip != params.end()) {
|
||||
ip = param_ip->second;
|
||||
} else {
|
||||
@@ -428,7 +416,6 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
p->ip = ip;
|
||||
p->ip_port = "";
|
||||
char x = 0;
|
||||
bool invalid_ip = false;
|
||||
for (size_t pos = 0, end = ip.length(); pos < end; pos++) {
|
||||
if (ip[pos] == '.') {
|
||||
p->ip_port.push_back(x);
|
||||
@@ -450,19 +437,21 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
invalid_ip = true;
|
||||
}
|
||||
p->invalid_ip = invalid_ip;
|
||||
} else {
|
||||
invalid_ip = p->invalid_ip;
|
||||
}
|
||||
|
||||
// Update the peer
|
||||
p->last_announced = cur_time;
|
||||
p->visible = peer_is_visible(&u, p);
|
||||
p->visible = peer_is_visible(u, p);
|
||||
|
||||
// Add peer data to the database
|
||||
std::stringstream record;
|
||||
if (peer_changed) {
|
||||
record << '(' << u.id << ',' << tor.id << ',' << active << ',' << uploaded << ',' << downloaded << ',' << upspeed << ',' << downspeed << ',' << left << ',' << corrupt << ',' << (cur_time - p->first_announced) << ',' << p->announces << ',';
|
||||
record << '(' << userid << ',' << tor.id << ',' << active << ',' << uploaded << ',' << downloaded << ',' << upspeed << ',' << downspeed << ',' << left << ',' << corrupt << ',' << (cur_time - p->first_announced) << ',' << p->announces << ',';
|
||||
std::string record_str = record.str();
|
||||
std::string record_ip;
|
||||
if (u.protect_ip) {
|
||||
if (u->is_protected()) {
|
||||
record_ip = "";
|
||||
} else {
|
||||
record_ip = ip;
|
||||
@@ -476,7 +465,7 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
|
||||
// Select peers!
|
||||
unsigned int numwant;
|
||||
std::map<std::string, std::string>::const_iterator param_numwant = params.find("numwant");
|
||||
params_type::const_iterator param_numwant = params.find("numwant");
|
||||
if (param_numwant == params.end()) {
|
||||
numwant = 50;
|
||||
} else {
|
||||
@@ -486,41 +475,40 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
if (stopped_torrent) {
|
||||
numwant = 0;
|
||||
if (left > 0) {
|
||||
if (tor.leechers.erase(peer_id) == 0) {
|
||||
std::cout << "Tried and failed to remove seeder from torrent " << tor.id << std::endl;
|
||||
}
|
||||
dec_l = true;
|
||||
} else {
|
||||
if (tor.seeders.erase(peer_id) == 0) {
|
||||
std::cout << "Tried and failed to remove leecher from torrent " << tor.id << std::endl;
|
||||
}
|
||||
dec_s = true;
|
||||
}
|
||||
} else if (completed_torrent) {
|
||||
snatches = 1;
|
||||
snatched = 1;
|
||||
update_torrent = true;
|
||||
tor.completed++;
|
||||
|
||||
std::stringstream record;
|
||||
std::string record_ip;
|
||||
if (u.protect_ip) {
|
||||
if (u->is_protected()) {
|
||||
record_ip = "";
|
||||
} else {
|
||||
record_ip = ip;
|
||||
}
|
||||
record << '(' << u.id << ',' << tor.id << ',' << cur_time << ", '" << record_ip << "')";
|
||||
record << '(' << userid << ',' << tor.id << ',' << cur_time;
|
||||
std::string record_str = record.str();
|
||||
db->record_snatch(record_str);
|
||||
db->record_snatch(record_str, record_ip);
|
||||
|
||||
// User is a seeder now!
|
||||
if (!inserted) {
|
||||
tor.seeders.insert(std::pair<std::string, peer>(peer_id, *p));
|
||||
tor.leechers.erase(peer_id);
|
||||
std::pair<peer_list::iterator, bool> insert
|
||||
= tor.seeders.insert(std::pair<std::string, peer>(peer_id, *p));
|
||||
tor.leechers.erase(peer_it);
|
||||
peer_it = insert.first;
|
||||
p = &peer_it->second;
|
||||
dec_l = inc_s = true;
|
||||
}
|
||||
if (expire_token) {
|
||||
s_comm->expire_token(tor.id, u.id);
|
||||
tor.tokened_users.erase(u.id);
|
||||
s_comm->expire_token(tor.id, userid);
|
||||
tor.tokened_users.erase(userid);
|
||||
}
|
||||
// do cache expire
|
||||
} else if (!u.can_leech && left > 0) {
|
||||
} else if (!u->can_leech() && left > 0) {
|
||||
numwant = 0;
|
||||
}
|
||||
|
||||
@@ -556,12 +544,12 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
}
|
||||
|
||||
// Add seeders
|
||||
while(i != end && found_peers < numwant) {
|
||||
while (i != end && found_peers < numwant) {
|
||||
if (i == tor.seeders.end()) {
|
||||
i = tor.seeders.begin();
|
||||
}
|
||||
// Don't show users themselves
|
||||
if (i->second.userid == p->userid || !i->second.visible) {
|
||||
if (i->second.user->get_id() == userid || !i->second.visible) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
@@ -575,7 +563,7 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
if (found_peers < numwant && tor.leechers.size() > 1) {
|
||||
for (peer_list::const_iterator i = tor.leechers.begin(); i != tor.leechers.end() && found_peers < numwant; i++) {
|
||||
// Don't show users themselves or leech disabled users
|
||||
if (i->second.ip_port == p->ip_port || i->second.userid == p->userid || !i->second.visible) {
|
||||
if (i->second.ip_port == p->ip_port || i->second.user->get_id() == userid || !i->second.visible) {
|
||||
continue;
|
||||
}
|
||||
found_peers++;
|
||||
@@ -586,8 +574,8 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
} else if (tor.leechers.size() > 0) { // User is a seeder, and we have leechers!
|
||||
for (peer_list::const_iterator i = tor.leechers.begin(); i != tor.leechers.end() && found_peers < numwant; i++) {
|
||||
// Don't show users themselves or leech disabled users
|
||||
if (i->second.userid == p->userid || !i->second.visible) {
|
||||
continue;
|
||||
if (i->second.user->get_id() == userid || !i->second.visible) {
|
||||
continue;
|
||||
}
|
||||
found_peers++;
|
||||
peers.append(i->second.ip_port);
|
||||
@@ -595,61 +583,104 @@ std::string worker::announce(torrent &tor, user &u, std::map<std::string, std::s
|
||||
}
|
||||
}
|
||||
|
||||
// Update the stats
|
||||
std::unique_lock<std::mutex> lock(stats.mutex);
|
||||
stats.succ_announcements++;
|
||||
if (dec_l || dec_s || inc_l || inc_s) {
|
||||
std::unique_lock<std::mutex> us_lock(ustats_lock);
|
||||
if (inc_l) {
|
||||
p->user->incr_leeching();
|
||||
stats.leechers++;
|
||||
}
|
||||
if (inc_s) {
|
||||
p->user->incr_seeding();
|
||||
stats.seeders++;
|
||||
}
|
||||
if (dec_l) {
|
||||
p->user->decr_leeching();
|
||||
stats.leechers--;
|
||||
}
|
||||
if (dec_s) {
|
||||
p->user->decr_seeding();
|
||||
stats.seeders--;
|
||||
}
|
||||
}
|
||||
lock.unlock();
|
||||
|
||||
// Correct the stats for the old user if the peer's user link has changed
|
||||
if (p->user != u) {
|
||||
if (!stopped_torrent) {
|
||||
std::unique_lock<std::mutex> us_lock(ustats_lock);
|
||||
if (left > 0) {
|
||||
u->incr_leeching();
|
||||
p->user->decr_leeching();
|
||||
} else {
|
||||
u->incr_seeding();
|
||||
p->user->decr_seeding();
|
||||
}
|
||||
}
|
||||
p->user = u;
|
||||
}
|
||||
|
||||
// Delete peers as late as possible to prevent access problems
|
||||
if (stopped_torrent) {
|
||||
if (left > 0) {
|
||||
tor.leechers.erase(peer_it);
|
||||
} else {
|
||||
tor.seeders.erase(peer_it);
|
||||
}
|
||||
}
|
||||
|
||||
// Putting this after the peer deletion gives us accurate swarm sizes
|
||||
if (update_torrent || tor.last_flushed + 3600 < cur_time) {
|
||||
tor.last_flushed = cur_time;
|
||||
|
||||
std::stringstream record;
|
||||
record << '(' << tor.id << ',' << tor.seeders.size() << ',' << tor.leechers.size() << ',' << snatches << ',' << tor.balance << ')';
|
||||
record << '(' << tor.id << ',' << tor.seeders.size() << ',' << tor.leechers.size() << ',' << snatched << ',' << tor.balance << ')';
|
||||
std::string record_str = record.str();
|
||||
db->record_torrent(record_str);
|
||||
}
|
||||
|
||||
if (!u.can_leech && left > 0) {
|
||||
|
||||
if (!u->can_leech() && left > 0) {
|
||||
return error("Access denied, leeching forbidden");
|
||||
}
|
||||
|
||||
std::string response = "d8:completei";
|
||||
response.reserve(350);
|
||||
response += inttostr(tor.seeders.size());
|
||||
response += "e10:downloadedi";
|
||||
response += inttostr(tor.completed);
|
||||
response += "e10:incompletei";
|
||||
response += inttostr(tor.leechers.size());
|
||||
response += "e8:intervali";
|
||||
response += inttostr(conf->announce_interval+std::min((size_t)600, tor.seeders.size())); // ensure a more even distribution of announces/second
|
||||
response += "e12:min intervali";
|
||||
response += inttostr(conf->announce_interval);
|
||||
response += "e5:peers";
|
||||
std::string output = "d8:completei";
|
||||
output.reserve(350);
|
||||
output += inttostr(tor.seeders.size());
|
||||
output += "e10:downloadedi";
|
||||
output += inttostr(tor.completed);
|
||||
output += "e10:incompletei";
|
||||
output += inttostr(tor.leechers.size());
|
||||
output += "e8:intervali";
|
||||
output += inttostr(conf->announce_interval+std::min((size_t)600, tor.seeders.size())); // ensure a more even distribution of announces/second
|
||||
output += "e12:min intervali";
|
||||
output += inttostr(conf->announce_interval);
|
||||
output += "e5:peers";
|
||||
if (peers.length() == 0) {
|
||||
response += "0:";
|
||||
output += "0:";
|
||||
} else {
|
||||
response += inttostr(peers.length());
|
||||
response += ":";
|
||||
response += peers;
|
||||
output += inttostr(peers.length());
|
||||
output += ":";
|
||||
output += peers;
|
||||
}
|
||||
if (p->invalid_ip) {
|
||||
response += warning("Illegal character found in IP address. IPv6 is not supported");
|
||||
if (invalid_ip) {
|
||||
output += warning("Illegal character found in IP address. IPv6 is not supported");
|
||||
}
|
||||
response += "e";
|
||||
output += 'e';
|
||||
|
||||
/* gzip compression actually makes announce returns larger from our
|
||||
* testing. Feel free to enable this here if you'd like but be aware of
|
||||
* possibly inflated return size
|
||||
if (headers["accept-encoding"] == "gzip") {
|
||||
std::stringstream ss, zss;
|
||||
ss << response;
|
||||
boost::iostreams::filtering_streambuf<boost::iostreams::input> in;
|
||||
in.push(boost::iostreams::gzip_compressor());
|
||||
in.push(ss);
|
||||
boost::iostreams::copy(in, zss);
|
||||
response = zss.str();
|
||||
*/
|
||||
/*if (headers["accept-encoding"].find("gzip") != std::string::npos) {
|
||||
gzip = true;
|
||||
}*/
|
||||
|
||||
return response;
|
||||
return response(output, gzip, false);
|
||||
}
|
||||
|
||||
std::string worker::scrape(const std::list<std::string> &infohashes, std::map<std::string, std::string> &headers, bool &gzip) {
|
||||
std::string worker::scrape(const std::list<std::string> &infohashes, params_type &headers) {
|
||||
bool gzip = false;
|
||||
std::string output = "d5:filesd";
|
||||
for (std::list<std::string>::const_iterator i = infohashes.begin(); i != infohashes.end(); i++) {
|
||||
std::string infohash = *i;
|
||||
@@ -672,52 +703,48 @@ std::string worker::scrape(const std::list<std::string> &infohashes, std::map<st
|
||||
output += inttostr(t->completed);
|
||||
output += "ee";
|
||||
}
|
||||
output+="ee";
|
||||
|
||||
output += "ee";
|
||||
if (headers["accept-encoding"].find("gzip") != std::string::npos) {
|
||||
std::stringstream ss, zss;
|
||||
ss << output;
|
||||
boost::iostreams::filtering_streambuf<boost::iostreams::input> in;
|
||||
in.push(boost::iostreams::gzip_compressor());
|
||||
in.push(ss);
|
||||
boost::iostreams::copy(in, zss);
|
||||
output = zss.str();
|
||||
gzip = true;
|
||||
}
|
||||
|
||||
return output;
|
||||
return response(output, gzip, false);
|
||||
}
|
||||
|
||||
//TODO: Restrict to local IPs
|
||||
std::string worker::update(std::map<std::string, std::string> ¶ms) {
|
||||
std::string worker::update(params_type ¶ms) {
|
||||
if (params["action"] == "change_passkey") {
|
||||
std::string oldpasskey = params["oldpasskey"];
|
||||
std::string newpasskey = params["newpasskey"];
|
||||
user_list::iterator i = users_list.find(oldpasskey);
|
||||
if (i == users_list.end()) {
|
||||
auto u = users_list.find(oldpasskey);
|
||||
if (u == users_list.end()) {
|
||||
std::cout << "No user with passkey " << oldpasskey << " exists when attempting to change passkey to " << newpasskey << std::endl;
|
||||
} else {
|
||||
users_list[newpasskey] = i->second;;
|
||||
users_list[newpasskey] = u->second;
|
||||
users_list.erase(oldpasskey);
|
||||
std::cout << "Changed passkey from " << oldpasskey << " to " << newpasskey << " for user " << i->second.id << std::endl;
|
||||
std::cout << "Changed passkey from " << oldpasskey << " to " << newpasskey << " for user " << u->second->get_id() << std::endl;
|
||||
}
|
||||
} else if (params["action"] == "add_torrent") {
|
||||
torrent t;
|
||||
t.id = strtolong(params["id"]);
|
||||
torrent *t;
|
||||
std::string info_hash = params["info_hash"];
|
||||
info_hash = hex_decode(info_hash);
|
||||
if (params["freetorrent"] == "0") {
|
||||
t.free_torrent = NORMAL;
|
||||
} else if (params["freetorrent"] == "1") {
|
||||
t.free_torrent = FREE;
|
||||
auto i = torrents_list.find(info_hash);
|
||||
if (i == torrents_list.end()) {
|
||||
t = &torrents_list[info_hash];
|
||||
t->id = strtolong(params["id"]);
|
||||
t->balance = 0;
|
||||
t->completed = 0;
|
||||
t->last_selected_seeder = "";
|
||||
} else {
|
||||
t.free_torrent = NEUTRAL;
|
||||
t = &i->second;
|
||||
}
|
||||
t.balance = 0;
|
||||
t.completed = 0;
|
||||
t.last_selected_seeder = "";
|
||||
torrents_list[info_hash] = t;
|
||||
std::cout << "Added torrent " << t.id<< ". FL: " << t.free_torrent << " " << params["freetorrent"] << std::endl;
|
||||
if (params["freetorrent"] == "0") {
|
||||
t->free_torrent = NORMAL;
|
||||
} else if (params["freetorrent"] == "1") {
|
||||
t->free_torrent = FREE;
|
||||
} else {
|
||||
t->free_torrent = NEUTRAL;
|
||||
}
|
||||
std::cout << "Added torrent " << t->id << ". FL: " << t->free_torrent << " " << params["freetorrent"] << std::endl;
|
||||
} else if (params["action"] == "update_torrent") {
|
||||
std::string info_hash = params["info_hash"];
|
||||
info_hash = hex_decode(info_hash);
|
||||
@@ -760,21 +787,21 @@ std::string worker::update(std::map<std::string, std::string> ¶ms) {
|
||||
}
|
||||
} else if (params["action"] == "add_token") {
|
||||
std::string info_hash = hex_decode(params["info_hash"]);
|
||||
int user_id = atoi(params["userid"].c_str());
|
||||
int userid = atoi(params["userid"].c_str());
|
||||
auto torrent_it = torrents_list.find(info_hash);
|
||||
if (torrent_it != torrents_list.end()) {
|
||||
torrent_it->second.tokened_users.insert(user_id);
|
||||
torrent_it->second.tokened_users.insert(userid);
|
||||
} else {
|
||||
std::cout << "Failed to find torrent to add a token for user " << user_id << std::endl;
|
||||
std::cout << "Failed to find torrent to add a token for user " << userid << std::endl;
|
||||
}
|
||||
} else if (params["action"] == "remove_token") {
|
||||
std::string info_hash = hex_decode(params["info_hash"]);
|
||||
int user_id = atoi(params["userid"].c_str());
|
||||
int userid = atoi(params["userid"].c_str());
|
||||
auto torrent_it = torrents_list.find(info_hash);
|
||||
if (torrent_it != torrents_list.end()) {
|
||||
torrent_it->second.tokened_users.erase(user_id);
|
||||
torrent_it->second.tokened_users.erase(userid);
|
||||
} else {
|
||||
std::cout << "Failed to find torrent " << info_hash << " to remove token for user " << user_id << std::endl;
|
||||
std::cout << "Failed to find torrent " << info_hash << " to remove token for user " << userid << std::endl;
|
||||
}
|
||||
} else if (params["action"] == "delete_torrent") {
|
||||
std::string info_hash = params["info_hash"];
|
||||
@@ -787,39 +814,56 @@ std::string worker::update(std::map<std::string, std::string> ¶ms) {
|
||||
auto torrent_it = torrents_list.find(info_hash);
|
||||
if (torrent_it != torrents_list.end()) {
|
||||
std::cout << "Deleting torrent " << torrent_it->second.id << " for the reason '" << get_del_reason(reason) << "'" << std::endl;
|
||||
boost::mutex::scoped_lock lock(del_reasons_lock);
|
||||
std::unique_lock<std::mutex> stats_lock(stats.mutex);
|
||||
stats.leechers -= torrent_it->second.leechers.size();
|
||||
stats.seeders -= torrent_it->second.seeders.size();
|
||||
stats_lock.unlock();
|
||||
std::unique_lock<std::mutex> us_lock(ustats_lock);
|
||||
for (auto p = torrent_it->second.leechers.begin(); p != torrent_it->second.leechers.end(); p++) {
|
||||
p->second.user->decr_leeching();
|
||||
}
|
||||
for (auto p = torrent_it->second.seeders.begin(); p != torrent_it->second.seeders.end(); p++) {
|
||||
p->second.user->decr_seeding();
|
||||
}
|
||||
us_lock.unlock();
|
||||
std::unique_lock<std::mutex> dr_lock(del_reasons_lock);
|
||||
del_message msg;
|
||||
msg.reason = reason;
|
||||
msg.time = time(NULL);
|
||||
del_reasons[info_hash] = msg;
|
||||
torrents_list.erase(torrent_it);
|
||||
} else {
|
||||
std::cout << "Failed to find torrent " << info_hash << " to delete " << std::endl;
|
||||
std::cout << "Failed to find torrent " << bintohex(info_hash) << " to delete " << std::endl;
|
||||
}
|
||||
} else if (params["action"] == "add_user") {
|
||||
std::string passkey = params["passkey"];
|
||||
unsigned int id = strtolong(params["id"]);
|
||||
user u;
|
||||
u.id = id;
|
||||
u.can_leech = 1;
|
||||
if (params["visible"] == "0") {
|
||||
u.protect_ip = 1;
|
||||
unsigned int userid = strtolong(params["id"]);
|
||||
auto u = users_list.find(passkey);
|
||||
if (u == users_list.end()) {
|
||||
bool protect_ip = params["visible"] == "0";
|
||||
user_ptr u(new user(userid, true, protect_ip));
|
||||
users_list.insert(std::pair<std::string, user_ptr>(passkey, u));
|
||||
std::cout << "Added user " << passkey << " with id " << userid << std::endl;
|
||||
} else {
|
||||
u.protect_ip = 0;
|
||||
std::cout << "Tried to add already known user " << passkey << " with id " << userid << std::endl;
|
||||
}
|
||||
users_list[passkey] = u;
|
||||
std::cout << "Added user " << id << std::endl;
|
||||
} else if (params["action"] == "remove_user") {
|
||||
std::string passkey = params["passkey"];
|
||||
users_list.erase(passkey);
|
||||
std::cout << "Removed user " << passkey << std::endl;
|
||||
auto u = users_list.find(passkey);
|
||||
if (u != users_list.end()) {
|
||||
std::cout << "Removed user " << passkey << " with id " << u->second->get_id() << std::endl;
|
||||
users_list.erase(u);
|
||||
}
|
||||
} else if (params["action"] == "remove_users") {
|
||||
// Each passkey is exactly 32 characters long.
|
||||
std::string passkeys = params["passkeys"];
|
||||
for (unsigned int pos = 0; pos < passkeys.length(); pos += 32) {
|
||||
std::string passkey = passkeys.substr(pos, 32);
|
||||
users_list.erase(passkey);
|
||||
std::cout << "Removed user " << passkey << std::endl;
|
||||
auto u = users_list.find(passkey);
|
||||
if (u != users_list.end()) {
|
||||
std::cout << "Removed user " << passkey << std::endl;
|
||||
users_list.erase(passkey);
|
||||
}
|
||||
}
|
||||
} else if (params["action"] == "update_user") {
|
||||
std::string passkey = params["passkey"];
|
||||
@@ -835,8 +879,8 @@ std::string worker::update(std::map<std::string, std::string> ¶ms) {
|
||||
if (i == users_list.end()) {
|
||||
std::cout << "No user with passkey " << passkey << " found when attempting to change leeching status!" << std::endl;
|
||||
} else {
|
||||
users_list[passkey].protect_ip = protect_ip;
|
||||
users_list[passkey].can_leech = can_leech;
|
||||
i->second->set_protected(protect_ip);
|
||||
i->second->set_leechstatus(can_leech);
|
||||
std::cout << "Updated user " << passkey << std::endl;
|
||||
}
|
||||
} else if (params["action"] == "add_whitelist") {
|
||||
@@ -879,50 +923,83 @@ std::string worker::update(std::map<std::string, std::string> ¶ms) {
|
||||
std::cout << "Failed to find torrent " << info_hash_hex << std::endl;
|
||||
}
|
||||
}
|
||||
return "success";
|
||||
return response("success", false, false);
|
||||
}
|
||||
|
||||
peer_list::iterator worker::add_peer(peer_list &peer_list, std::string &peer_id) {
|
||||
peer new_peer;
|
||||
std::pair<peer_list::iterator, bool> insert
|
||||
= peer_list.insert(std::pair<std::string, peer>(peer_id, new_peer));
|
||||
return insert.first;
|
||||
}
|
||||
|
||||
void worker::start_reaper() {
|
||||
std::thread thread(&worker::do_start_reaper, this);
|
||||
thread.detach();
|
||||
}
|
||||
|
||||
void worker::do_start_reaper() {
|
||||
reap_peers();
|
||||
reap_del_reasons();
|
||||
}
|
||||
|
||||
void worker::reap_peers() {
|
||||
boost::thread thread(&worker::do_reap_peers, this);
|
||||
boost::thread t(&worker::do_reap_del_reasons, this);
|
||||
}
|
||||
|
||||
void worker::do_reap_peers() {
|
||||
std::cout << "Starting peer reaper" << std::endl;
|
||||
cur_time = time(NULL);
|
||||
unsigned int reaped = 0;
|
||||
torrent_list::iterator i = torrents_list.begin();
|
||||
for (; i != torrents_list.end(); i++) {
|
||||
peer_list::iterator p = i->second.leechers.begin();
|
||||
unsigned int reaped_l = 0, reaped_s = 0;
|
||||
unsigned int cleared_torrents = 0;
|
||||
for (auto t = torrents_list.begin(); t != torrents_list.end(); t++) {
|
||||
bool reaped_this = false; // True if at least one peer was deleted from the current torrent
|
||||
auto p = t->second.leechers.begin();
|
||||
peer_list::iterator del_p;
|
||||
while(p != i->second.leechers.end()) {
|
||||
while (p != t->second.leechers.end()) {
|
||||
if (p->second.last_announced + conf->peers_timeout < cur_time) {
|
||||
del_p = p;
|
||||
p++;
|
||||
boost::mutex::scoped_lock lock(db->torrent_list_mutex);
|
||||
i->second.leechers.erase(del_p);
|
||||
reaped++;
|
||||
std::unique_lock<std::mutex> us_lock(ustats_lock);
|
||||
del_p->second.user->decr_leeching();
|
||||
us_lock.unlock();
|
||||
std::unique_lock<std::mutex> tl_lock(db->torrent_list_mutex);
|
||||
t->second.leechers.erase(del_p);
|
||||
reaped_this = true;
|
||||
reaped_l++;
|
||||
} else {
|
||||
p++;
|
||||
}
|
||||
}
|
||||
p = i->second.seeders.begin();
|
||||
while(p != i->second.seeders.end()) {
|
||||
p = t->second.seeders.begin();
|
||||
while (p != t->second.seeders.end()) {
|
||||
if (p->second.last_announced + conf->peers_timeout < cur_time) {
|
||||
del_p = p;
|
||||
p++;
|
||||
boost::mutex::scoped_lock lock(db->torrent_list_mutex);
|
||||
i->second.seeders.erase(del_p);
|
||||
reaped++;
|
||||
std::unique_lock<std::mutex> us_lock(ustats_lock);
|
||||
del_p->second.user->decr_seeding();
|
||||
us_lock.unlock();
|
||||
std::unique_lock<std::mutex> tl_lock(db->torrent_list_mutex);
|
||||
t->second.seeders.erase(del_p);
|
||||
reaped_this = true;
|
||||
reaped_s++;
|
||||
} else {
|
||||
p++;
|
||||
}
|
||||
}
|
||||
if (reaped_this && t->second.seeders.empty() && t->second.leechers.empty()) {
|
||||
std::stringstream record;
|
||||
record << '(' << t->second.id << ",0,0,0," << t->second.balance << ')';
|
||||
std::string record_str = record.str();
|
||||
db->record_torrent(record_str);
|
||||
cleared_torrents++;
|
||||
}
|
||||
}
|
||||
std::cout << "Reaped " << reaped << " peers" << std::endl;
|
||||
if (reaped_l || reaped_s) {
|
||||
std::unique_lock<std::mutex> lock(stats.mutex);
|
||||
stats.leechers -= reaped_l;
|
||||
stats.seeders -= reaped_s;
|
||||
}
|
||||
std::cout << "Reaped " << reaped_l << " leechers and " << reaped_s << " seeders. Reset " << cleared_torrents << " torrents" << std::endl;
|
||||
}
|
||||
|
||||
void worker::do_reap_del_reasons()
|
||||
void worker::reap_del_reasons()
|
||||
{
|
||||
std::cout << "Starting del reason reaper" << std::endl;
|
||||
time_t max_time = time(NULL) - conf->del_reason_lifetime;
|
||||
@@ -932,7 +1009,7 @@ void worker::do_reap_del_reasons()
|
||||
if (it->second.time <= max_time) {
|
||||
auto del_it = it;
|
||||
it++;
|
||||
boost::mutex::scoped_lock lock(del_reasons_lock);
|
||||
std::unique_lock<std::mutex> dr_lock(del_reasons_lock);
|
||||
del_reasons.erase(del_it);
|
||||
reaped++;
|
||||
continue;
|
||||
@@ -1022,6 +1099,6 @@ std::string worker::get_del_reason(int code)
|
||||
|
||||
/* Peers should be invisible if they are a leecher without
|
||||
download privs or their IP is invalid */
|
||||
bool worker::peer_is_visible(user *u, peer *p) {
|
||||
return (p->left == 0 || u->can_leech) && !p->invalid_ip;
|
||||
bool worker::peer_is_visible(user_ptr &u, peer *p) {
|
||||
return (p->left == 0 || u->can_leech()) && !p->invalid_ip;
|
||||
}
|
||||
|
||||
26
worker.h
26
worker.h
@@ -6,7 +6,7 @@
|
||||
#include <arpa/inet.h>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <mutex>
|
||||
#include "site_comm.h"
|
||||
#include "ocelot.h"
|
||||
|
||||
@@ -20,27 +20,29 @@ class worker {
|
||||
std::unordered_map<std::string, del_message> del_reasons;
|
||||
config * conf;
|
||||
mysql * db;
|
||||
void do_reap_peers();
|
||||
void do_reap_del_reasons();
|
||||
tracker_status status;
|
||||
time_t cur_time;
|
||||
site_comm * s_comm;
|
||||
|
||||
std::mutex del_reasons_lock;
|
||||
std::mutex ustats_lock;
|
||||
void do_start_reaper();
|
||||
void reap_peers();
|
||||
void reap_del_reasons();
|
||||
std::string get_del_reason(int code);
|
||||
boost::mutex del_reasons_lock;
|
||||
bool peer_is_visible(user *u, peer *p);
|
||||
peer_list::iterator add_peer(peer_list &peer_list, std::string &peer_id);
|
||||
bool peer_is_visible(user_ptr &u, peer *p);
|
||||
|
||||
public:
|
||||
worker(torrent_list &torrents, user_list &users, std::vector<std::string> &_whitelist, config * conf_obj, mysql * db_obj, site_comm * sc);
|
||||
std::string work(std::string &input, std::string &ip, bool &gzip);
|
||||
std::string error(std::string err);
|
||||
std::string warning(std::string err);
|
||||
std::string announce(torrent &tor, user &u, std::map<std::string, std::string> ¶ms, std::map<std::string, std::string> &headers, std::string &ip, bool &gzip);
|
||||
std::string scrape(const std::list<std::string> &infohashes, std::map<std::string, std::string> &headers, bool &gzip);
|
||||
std::string update(std::map<std::string, std::string> ¶ms);
|
||||
std::string work(std::string &input, std::string &ip);
|
||||
std::string announce(torrent &tor, user_ptr &u, params_type ¶ms, params_type &headers, std::string &ip);
|
||||
std::string scrape(const std::list<std::string> &infohashes, params_type &headers);
|
||||
std::string update(params_type ¶ms);
|
||||
|
||||
bool signal(int sig);
|
||||
|
||||
tracker_status get_status() { return status; }
|
||||
|
||||
void reap_peers();
|
||||
void start_reaper();
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user