[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Bug#853808: Acknowledgement (unblock: ora2pg/18.0-1)



I forgot to add the debdiff

PS: the package is on the NEW queue at the moment

-- 
1AE0 322E B8F7 4717 BDEA BF1D 44BB 1BA7 9F6C 6333

keybase: https://keybase.io/gfa
diff -Nru ora2pg-18.0/Makefile.PL ora2pg-17.6/Makefile.PL
--- ora2pg-18.0/Makefile.PL	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/Makefile.PL	2016-11-18 05:45:49.000000000 +0800
@@ -260,99 +260,12 @@
 # define multiple REPLACE_QUERY lines.
 #REPLACE_QUERY	EMPLOYEES[SELECT e.id,e.fisrtname,lastname FROM EMPLOYEES e JOIN EMP_UPDT u ON (e.id=u.id AND u.cdate>'2014-08-01 00:00:00')]
 
-#------------------------------------------------------------------------------
-# FULL TEXT SEARCH SECTION (Control full text search export behaviors)
-#------------------------------------------------------------------------------
-
 # Force Ora2Pg to translate Oracle Text indexes into PostgreSQL indexes using
 # pg_trgm extension. Default is to translate CONTEXT indexes into FTS indexes
 # and CTXCAT indexes using pg_trgm. Most of the time using pg_trgm is enough,
 # this is why this directive stand for.
-#
 CONTEXT_AS_TRGM		0
 
-#�By default Ora2Pg creates a function-based index to translate Oracle Text
-#�indexes. 
-#    CREATE INDEX ON t_document
-#                 USING gin(to_tsvector('french', title));
-# You will have to rewrite the CONTAIN() clause using to_tsvector(), example:
-#    SELECT id,title FROM t_document
-#                    WHERE to_tsvector(title)) @@ to_tsquery('search_word');
-#
-# To force Ora2Pg to create an extra tsvector column with a dedicated triggers
-# for FTS indexes, disable this directive. In this case, Ora2Pg will add the
-# column as follow: ALTER TABLE t_document ADD COLUMN tsv_title tsvector;
-#�Then update the column to compute FTS vectors if data have been loaded before
-#     UPDATE t_document SET tsv_title =
-#                       to_tsvector('french', coalesce(title,''));
-# To automatically update the column when a modification in the title column
-# appears, Ora2Pg adds the following trigger:
-#
-# CREATE FUNCTION tsv_t_document_title() RETURNS trigger AS $$
-# BEGIN
-#        IF TG_OP = 'INSERT' OR new.title != old.title THEN
-#                new.tsv_title :=
-#                to_tsvector('french', coalesce(new.title,''));
-#        END IF;
-#        return new;
-# END
-# $$ LANGUAGE plpgsql;
-# CREATE TRIGGER trig_tsv_t_document_title BEFORE INSERT OR UPDATE
-#  ON t_document
-#  FOR EACH ROW EXECUTE PROCEDURE tsv_t_document_title();
-#
-# When the Oracle text index is defined over multiple column, Ora2Pg will use
-#�setweight() to set a weight in the order of the column declaration.
-#
-FTS_INDEX_ONLY	1
-
-# Use this directive to force text search configuration to use. When it is not
-# set, Ora2Pg will autodetect the stemmer used by Oracle for each index and
-#�pg_catalog.english if nothing is found. 
-#�
-#FTS_CONFIG	pg_catalog.french
-
-# If you want to perform your text search in an accent insensitive way, enable
-# this directive. Ora2Pg will create an helper function over unaccent() and
-# creates the pg_trgm indexes using this function. With FTS Ora2Pg will
-# redefine your text search configuration, for example:
-#
-#	CREATE TEXT SEARCH CONFIGURATION fr (COPY = pg_catalog.french); 
-#	ALTER TEXT SEARCH CONFIGURATION fr
-#		ALTER MAPPING FOR hword, hword_part, word WITH unaccent, french_stem;
-#
-# When enabled, Ora2pg will create the wrapper function:
-#
-#	CREATE OR REPLACE FUNCTION unaccent_immutable(text)
-#	RETURNS text AS
-#	$$
-#		SELECT public.unaccent('public.unaccent', $1)
-#	$$  LANGUAGE sql IMMUTABLE
-#	    COST 1;
-#
-# indexes are exported as follow:
-#
-#	CREATE INDEX t_document_title_unaccent_trgm_idx ON t_document 
-#        	USING gin (unaccent_immutable(title) gin_trgm_ops);
-#
-# In your queries you will need to use the same function in the search to
-# be able to use the function-based index. Example:
-#
-#	SELECT * FROM t_document
-#		WHERE unaccent_immutable(title) LIKE '%donnees%';
-#
-USE_UNACCENT		0
-
-# Same as above but call lower() in the unaccent_immutable() function:
-#
-#      CREATE OR REPLACE FUNCTION unaccent_immutable(text)
-#      RETURNS text AS
-#      $$
-#          SELECT lower(public.unaccent('public.unaccent', $1));
-#      $$ LANGUAGE sql IMMUTABLE;
-#
-USE_LOWER_UNACCENT	0
-
 #------------------------------------------------------------------------------
 # CONSTRAINT SECTION (Control constraints export and import behaviors)
 #------------------------------------------------------------------------------
@@ -441,6 +354,12 @@
 # Modify output from the following tables(fields separate by space or comma)
 #MODIFY_STRUCT	TABLE_TEST(dico,dossier)
 
+# Some time you need to force the destination type, for example a column
+# exported as timestamp by Ora2Pg can be forced into type date. Value is
+# a comma-separated list of TABLE:COLUMN:TYPE structure. If you need to use
+# comma or space inside type definition you will have to backslash them.
+#MODIFY_TYPE     TABLE1:COL3:varchar,TABLE1:COL4:decimal(9\,6)
+
 # You may wish to change table names during data extraction, especally for
 # replication use. Give a list of tables separate by space as follow.
 #REPLACE_TABLES	ORIG_TB_NAME1:NEW_TB_NAME1 ORIG_TB_NAME2:NEW_TB_NAME2 
@@ -595,13 +514,7 @@
 # If you're experiencing problems in data type export, the following directive
 # will help you to redefine data type translation used in Ora2pg. The syntax is
 # a comma separated list of "Oracle datatype:Postgresql data type". Here are the
-# data type that can be redefined and their default value. If you want to
-# replace a type with a precision and scale you need to escape the coma with
-# a backslash. For example, if you want to replace all NUMBER(*,0) into bigint
-# instead of numeric(38)add the following:
-# 	DATA_TYPE	NUMBER(*\,0):bigint
-# Here is the default replacement for all Oracle's types. You don't have to
-# recopy all type conversion but just the one you want to rewrite.
+# data type that can be redefined and their default value.
 #DATA_TYPE	DATE:timestamp,LONG:text,LONG RAW:bytea,CLOB:text,NCLOB:text,BLOB:bytea,BFILE:bytea,RAW:bytea,ROWID:oid,FLOAT:double precision,DEC:decimal,DECIMAL:decimal,DOUBLE PRECISION:double precision,INT:numeric,INTEGER:numeric,REAL:real,SMALLINT:smallint,BINARY_FLOAT:double precision,BINARY_DOUBLE:double precision,TIMESTAMP:timestamp,XMLTYPE:xml,BINARY_INTEGER:integer,PLS_INTEGER:integer,TIMESTAMP WITH TIME ZONE:timestamp with time zone,TIMESTAMP WITH LOCAL TIME ZONE:timestamp with time zone
 
 # If set to 1 replace portable numeric type into PostgreSQL internal type.
@@ -646,12 +559,6 @@
 # to use a fake date.
 #REPLACE_ZERO_DATE	1970-01-01 00:00:00
 
-# Some time you need to force the destination type, for example a column
-# exported as timestamp by Ora2Pg can be forced into type date. Value is
-# a comma-separated list of TABLE:COLUMN:TYPE structure. If you need to use
-# comma or space inside type definition you will have to backslash them.
-#MODIFY_TYPE     TABLE1:COL3:varchar,TABLE1:COL4:decimal(9\,6)
-
 #------------------------------------------------------------------------------
 # GRANT SECTION (Control priviledge and owner export)
 #------------------------------------------------------------------------------
@@ -818,7 +725,7 @@
 
 
 #------------------------------------------------------------------------------
-# PLSQL SECTION (Control SQL and PL/SQL to PLPGSQL rewriting behaviors)
+# PLSQL SECTION (Control PL/SQL to PLPGSQL rewriting behaviors)
 #------------------------------------------------------------------------------
 
 # If the above configuration directive is not enough to validate your PL/SQL code
@@ -854,11 +761,6 @@
 # is to use a schema to emulate package.
 PACKAGE_AS_SCHEMA	1
 
-# Enable this directive if the rewrite of Oracle native syntax (+) of
-#�OUTER JOIN is broken. This will force Ora2Pg to not rewrite such code,
-# default is to try to rewrite simple form of rigth outer join for the
-# moment.
-REWRITE_OUTER_JOIN	1
 
 #------------------------------------------------------------------------------
 # ASSESSMENT SECTION (Control migration assessment behaviors)
@@ -895,12 +797,6 @@
 # is given at input using the -i option or INPUT directive.
 #AUDIT_USER	USERNAME1,USERNAME2
 
-# By default Ora2Pg will convert call to SYS_GUID() Oracle function
-# with a call to uuid_generate_v4() from uuid-ossp extension. You can
-# redefined it to use the gen_random_uuid() function from pgcrypto
-# extension by changing the function name below.
-#UUID_FUNCTION	uuid_generate_v4
-
 #------------------------------------------------------------------------------
 # POSTGRESQL FEATURE SECTION (Control which PostgreSQL features are available)
 #------------------------------------------------------------------------------
@@ -1035,7 +931,7 @@
     'DESTDIR'      => $PREFIX,
     'INSTALLDIRS'  => $ENV{INSTALLDIRS},
     'clean'        => {FILES => "$DEST_CONF_FILE lib/blib/"},
-    'PREREQ_PM'    => {DBI => 0},
+    'PREREQ_PM'    => { DBI => 0, DBD::Oracle },
     'META_MERGE'   => {
 	resources  => {
 		homepage => 'http://ora2pg.darold.net/',
diff -Nru ora2pg-18.0/README ora2pg-17.6/README
--- ora2pg-18.0/README	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/README	2016-11-18 05:45:49.000000000 +0800
@@ -16,7 +16,7 @@
 FEATURES
     Ora2Pg consist of a Perl script (ora2pg) and a Perl module (Ora2Pg.pm),
     the only thing you have to modify is the configuration file ora2pg.conf
-    by setting the DSN to the Oracle database and optionally the name of a
+    by setting the DSN to the Oracle database and optionaly the name of a
     schema. Once that's done you just have to set the type of export you
     want: TABLE with constraints, VIEW, MVIEW, TABLESPACE, SEQUENCE,
     INDEXES, TRIGGER, GRANT, FUNCTION, PROCEDURE, PACKAGE, PARTITION, TYPE,
@@ -64,7 +64,7 @@
     PostgreSQL but there's still manual works to do. The Oracle specific
     PL/SQL code generated for functions, procedures, packages and triggers
     has to be reviewed to match the PostgreSQL syntax. You will find some
-    useful recommendations on porting Oracle PL/SQL code to PostgreSQL
+    useful recommandations on porting Oracle PL/SQL code to PostgreSQL
     PL/PGSQL at "Converting from other Databases to PostgreSQL", section:
     Oracle (http://wiki.postgresql.org/wiki/Main_Page).
 
@@ -88,17 +88,11 @@
     an already packaged DBD::Oracle easy to install.
 
   Requirement
-    You need a modern Perl distribution (perl 5.10 and more). To connect to
-    a database and proceed to his migration you need the DBI Perl module >
-    1.614. To migrate an Oracle database you need the DBD::Oracle Perl
-    modules to be installed. To migrate a MySQL database you need the
-    DBD::MySQL Perl modules. These modules are used to connect to the
-    database but they are not mandatory if you want to migrate DDL input
-    files.
-
-    To install DBD::Oracle and have it working you need to have the Oracle
-    client libraries installed and the ORACLE_HOME environment variable must
-    be defined.
+    You need a modern Perl distribution (perl 5.10 and more), the DBI >
+    1.614 and DBD::Oracle Perl modules to be installed. These are used to
+    connect to the Oracle database. To install DBD::Oracle and have it
+    working you need to have the Oracle client libraries installed and the
+    ORACLE_HOME environment variable must be defined.
 
     If you plan to export a MySQL database you need to install the Perl
     module DBD::mysql which require that the mysql client libraries are
@@ -114,7 +108,7 @@
     to a host with the psql client installed. If you prefer to load export
     'on the fly', the perl module DBD::Pg is required.
 
-    Ora2Pg allow to dump all output in a compressed gzip file, to do that
+    Ora2Pg allow to dump all output int a compressed gzip file, to do that
     you need the Compress::Zlib Perl module or if you prefer using bzip2
     compression, the program bzip2 must be available in your PATH.
 
@@ -122,8 +116,8 @@
     Like any other Perl Module Ora2Pg can be installed with the following
     commands:
 
-            tar xzf ora2pg-x.x.tar.gz
-            cd ora2pg-x.x/
+            tar xzf ora2pg-10.x.tar.gz
+            cd ora2pg-10.x/
             perl Makefile.PL
             make && make install
 
@@ -198,13 +192,13 @@
     are set at the time they are read in the configuration file.
 
     For configuration directives that just take a single value, you can use
-    them multiple time in the configuration file but only the last
-    occurrence found in the file will be used. For configuration directives
-    that allow a list of value, you can use it multiple time, the values
-    will be appended to the list. If you use the IMPORT directive to load a
-    custom configuration file, directives defined in this file will be
-    stores from the place the IMPORT directive is found, so it is better to
-    put it at the end of the configuration file.
+    them multiple time in the configuration file but only the last occurence
+    found in the file will be used. For configuration directives that allow
+    a list of value, you can use it multiple time, the values will be
+    appended to the list. If you use the IMPORT directive to load a custom
+    configuration file, directives defined in this file will be stores from
+    the place the IMPORT directive is found, so it is better to put it at
+    the end of the configuration file.
 
     Values set in command line options will override values from the
     configuration file.
@@ -235,7 +229,6 @@
         -c | --conf file  : Used to set an alternate configuration file than the
                             default /etc/ora2pg/ora2pg.conf.
         -d | --debug      : Enable verbose output.
-        -D | --data_type STR : Allow custom type replacement at command line.
         -e | --exclude str: coma separated list of objects to exclude from export.
                             Can be used with SHOW_COLUMN too.
         -h | --help       : Print this short help.
@@ -304,7 +297,7 @@
     happen." Most of the time this is an OOM issue, you might first reduce
     DATA_LIMIT value.
 
-    For developers, it is possible to add your own custom option(s) in the
+    For developpers, it is possible to add your own custom option(s) in the
     Perl script ora2pg as any configuration directive from ora2pg.conf can
     be passed in lower case to the new Ora2Pg object instance. See ora2pg
     code on how to add your own option.
@@ -430,7 +423,7 @@
         a file as argument. Set this directive to a file containing PL/SQL
         Oracle Code like function, procedure or full package body to prevent
         Ora2Pg from connecting to an Oracle database and just apply his
-        conversion tool to the content of the file. This can be used with
+        convertion tool to the content of the file. This can be used with
         the most of export types: TABLE, TRIGGER, PROCEDURE, VIEW, FUNCTION
         or PACKAGE, etc.
 
@@ -480,7 +473,7 @@
     The perl script says nothing and the output file is empty: the user has
     not enough right to extract something from the database. Try to connect
     Oracle as super user or take a look at directive USER_GRANTS above and
-    at next section, especially the SCHEMA directive.
+    at next section, especiallly the SCHEMA directive.
 
     LOGFILE
         By default all message are sent to the standard output. If you give
@@ -651,7 +644,7 @@
         Note that you can chained multiple export by giving to the TYPE
         directive a comma-separated list of export type.
 
-        Ora2Pg will convert Oracle partition using table inheritance,
+        Ora2Pg will convert Oracle partition using table inheritence,
         trigger and functions. See document at Pg site:
         http://www.postgresql.org/docs/current/interactive/ddl-partitioning.
         html
@@ -716,7 +709,7 @@
         See http://pgxn.org/dist/oracle_fdw/ for more information on this
         foreign data wrapper.
 
-        Release 10 adds a new export type destined to evaluate the content
+        Release 10 adds a new export type destinated to evaluate the content
         of the database to migrate, in terms of objects and cost to end the
         migration:
 
@@ -730,8 +723,8 @@
     ESTIMATE_COST
         Activate the migration cost evaluation. Must only be used with
         SHOW_REPORT, FUNCTION, PROCEDURE, PACKAGE and QUERY export type.
-        Default is disabled. You may want to use the --estimate_cost command
-        line option instead to activate this functionality. Note that
+        Default is disabled. You may wat to use the --estimate_cost command
+        line option instead to activate this functionnality. Note that
         enabling this directive will force PLSQL_PGSQL activation.
 
     COST_UNIT_VALUE
@@ -853,7 +846,7 @@
                 ALLOW           EMPLOYEES SALE_.* COUNTRIES .*_GEOM_SEQ
 
         will export objects with name EMPLOYEES, COUNTRIES, all objects
-        beginning with 'SALE_' and all objects with a name ending by
+        begining with 'SALE_' and all objects with a name ending by
         '_GEOM_SEQ'. The object depends of the export type. Note that regex
         will not works with 8i database, you must use the % placeholder
         instead, Ora2Pg will use the LIKE operator.
@@ -876,15 +869,15 @@
                         -e 'INDEX[emp_.*];CKEY[emp_salary_min]'
 
         This command will export the definition of the employee table but
-        will exclude all index beginning with 'emp_' and the CHECK
-        constraint called 'emp_salary_min'.
+        will exclude all index begining with 'emp_' and the CHECK contraint
+        called 'emp_salary_min'.
 
         When exporting partition you can exclude some partition tables by
         using
 
                 ora2pg -p -c ora2pg.conf -t PARTITION -e 'PARTITION[PART_199.* PART_198.*]'
 
-        This will exclude partitioned tables for year 1980 to 1999 from the
+        This will exclude partitionned tables for year 1980 to 1999 from the
         export but not the main partition table. The trigger will also be
         adapted to exclude those table.
 
@@ -914,7 +907,7 @@
                 EXCLUDE         EMPLOYEES TMP_.* COUNTRIES
 
         will exclude object with name EMPLOYEES, COUNTRIES and all tables
-        beginning with 'tmp_'.
+        begining with 'tmp_'.
 
         For example, you can ban from export some unwanted function with
         this directive:
@@ -922,7 +915,7 @@
                 EXCLUDE         write_to_.* send_mail_.*
 
         this example will exclude all functions, procedures or functions in
-        a package with the name beginning with those regex. Note that regex
+        a package with the name begining with those regex. Note that regex
         will not works with 8i database, you must use the % placeholder
         instead, Ora2Pg will use the NOT LIKE operator.
 
@@ -989,116 +982,20 @@
     REPLACE_QUERY
         Sometime you may want to extract data from an Oracle table but you
         need a a custom query for that. Not just a "SELECT * FROM table"
-        like Ora2Pg do but a more complex query. This directive allow you to
-        overwrite the query used by Ora2Pg to extract data. The format is
+        like Ora2Pg do but a more complexe query. This directive allow you
+        to overwrite the query used by Ora2Pg to extract data. The format is
         TABLENAME[SQL_QUERY]. If you have multiple table to extract by
         replacing the Ora2Pg query, you can define multiple REPLACE_QUERY
         lines.
 
                 REPLACE_QUERY   EMPLOYEES[SELECT e.id,e.fisrtname,lastname FROM EMPLOYEES e JOIN EMP_UPDT u ON (e.id=u.id AND u.cdate>'2014-08-01 00:00:00')]
 
-  Controm of Full Text Search export
-    Several directives can be used to control the way Ora2Pg will export the
-    Oracle's Text search indexes. By default CONTEXT indexes will be
-    exported to PostgreSQL FTS indexes but CTXCAT indexes wikk be exported
-    as indexes using the pg_trgm extension.
-
     CONTEXT_AS_TRGM
         Force Ora2Pg to translate Oracle Text indexes into PostgreSQL
         indexes using pg_trgm extension. Default is to translate CONTEXT
         indexes into FTS indexes and CTXCAT indexes using pg_trgm. Most of
         the time using pg_trgm is enough, this is why this directive stand
-        for. You need to create the pg_trgm extension into the destination
-        database before importing the objects:
-
-                CREATE EXTENSION pg_trgm;
-
-    FTS_INDEX_ONLY
-        By default Ora2Pg creates a function-based index to translate Oracle
-        Text indexes.
-
-                CREATE INDEX ON t_document
-                        USING gin(to_tsvector('pg_catalog.french', title));
-
-        You will have to rewrite the CONTAIN() clause using to_tsvector(),
-        example:
-
-                SELECT id,title FROM t_document
-                        WHERE to_tsvector(title)) @@ to_tsquery('search_word');
-
-        To force Ora2Pg to create an extra tsvector column with a dedicated
-        triggers for FTS indexes, disable this directive. In this case,
-        Ora2Pg will add the column as follow: ALTER TABLE t_document ADD
-        COLUMN tsv_title tsvector; Then update the column to compute FTS
-        vectors if data have been loaded before UPDATE t_document SET
-        tsv_title = to_tsvector('pg_catalog.french', coalesce(title,'')); To
-        automatically update the column when a modification in the title
-        column appears, Ora2Pg adds the following trigger:
-
-                CREATE FUNCTION tsv_t_document_title() RETURNS trigger AS $$
-                BEGIN
-                       IF TG_OP = 'INSERT' OR new.title != old.title THEN
-                               new.tsv_title :=
-                               to_tsvector('pg_catalog.french', coalesce(new.title,''));
-                       END IF;
-                       return new;
-                END
-                $$ LANGUAGE plpgsql;
-                CREATE TRIGGER trig_tsv_t_document_title BEFORE INSERT OR UPDATE
-                 ON t_document
-                 FOR EACH ROW EXECUTE PROCEDURE tsv_t_document_title();
-
-        When the Oracle text index is defined over multiple column, Ora2Pg
-        will use setweight() to set a weight in the order of the column
-        declaration.
-
-    FTS_CONFIG
-        Use this directive to force text search configuration to use. When
-        it is not set, Ora2Pg will autodetect the stemmer used by Oracle for
-        each index and pg_catalog.english if the information is not found.
-
-    USE_UNACCENT
-        If you want to perform your text search in an accent insensitive
-        way, enable this directive. Ora2Pg will create an helper function
-        over unaccent() and creates the pg_trgm indexes using this function.
-        With FTS Ora2Pg will redefine your text search configuration, for
-        example:
-
-              CREATE TEXT SEARCH CONFIGURATION fr (COPY = french); 
-              ALTER TEXT SEARCH CONFIGURATION fr
-                      ALTER MAPPING FOR hword, hword_part, word WITH unaccent, french_stem;
-
-        then set the FTS_CONFIG ora2pg.conf directive to fr instead of
-        pg_catalog.english.
-
-        When enabled, Ora2pg will create the wrapper function:
-
-              CREATE OR REPLACE FUNCTION unaccent_immutable(text)
-              RETURNS text AS
-              $$
-                  SELECT public.unaccent('public.unaccent', $1);
-              $$ LANGUAGE sql IMMUTABLE
-                 COST 1;
-
-        the indexes are exported as follow:
-
-              CREATE INDEX t_document_title_unaccent_trgm_idx ON t_document 
-                  USING gin (unaccent_immutable(title) gin_trgm_ops);
-
-        In your queries you will need to use the same function in the search
-        to be able to use the function-based index. Example:
-
-                SELECT * FROM t_document
-                        WHERE unaccent_immutable(title) LIKE '%donnees%';
-
-    USE_LOWER_UNACCENT
-        Same as above but call lower() in the unaccent_immutable() function:
-
-              CREATE OR REPLACE FUNCTION unaccent_immutable(text)
-              RETURNS text AS
-              $$
-                  SELECT lower(public.unaccent('public.unaccent', $1));
-              $$ LANGUAGE sql IMMUTABLE;
+        for.
 
   Modifying object structure
     One of the great usage of Ora2Pg is its flexibility to replicate Oracle
@@ -1129,6 +1026,18 @@
         and columns 'id' and 'fichier' from the T_TEST2 table. This
         directive is only used with COPY or INSERT export.
 
+    MODIFY_TYPE
+        Some time you need to force the destination type, for example a
+        column exported as timestamp by Ora2Pg can be forced into type date.
+        Value is a comma-separated list of TABLE:COLUMN:TYPE structure. If
+        you need to use comma or space inside type definition you will have
+        to backslach them.
+
+                MODIFY_TYPE     TABLE1:COL3:varchar,TABLE1:COL4:decimal(9\,6)
+
+        Type of table1.col3 will be replaced by a varchar and table1.col4 by
+        a decimal with precision.
+
     REPLACE_TABLES
         This directive allow you to remap a list of Oracle table name to a
         PostgreSQL table name during export. The value is a list of
@@ -1141,7 +1050,7 @@
 
     REPLACE_COLS
         Like table name, the name of the column can be remapped to a
-        different name using the following syntax:
+        different name using the following syntaxe:
 
                 REPLACE_COLS    ORIG_TBNAME(ORIG_COLNAME1:NEW_COLNAME1,ORIG_COLNAME2:NEW_COLNAME2)
 
@@ -1214,7 +1123,7 @@
         value 1, this will force Ora2Pg to export all indexes defined on
         varchar2() and char() columns using those operators. If you set it
         to a value greater than 1 it will only change indexes on columns
-        where the character limit is greater or equal than this value. For
+        where the charactere limit is greater or equal than this value. For
         example, set it to 128 to create these kind of indexes on columns of
         type varchar2(N) where N >= 128.
 
@@ -1222,14 +1131,14 @@
         Enable this directive if you want that your partition table name
         will be exported using the parent table name. Disabled by default.
         If you have multiple partitioned table, when exported to PostgreSQL
-        some partitions could have the same name but different parent
-        tables. This is not allowed, table name must be unique.
+        some partitions could have the same name but dfferent parent tables.
+        This is not allowed, table name must be unique.
 
     DISABLE_PARTITION
         If you don't want to reproduce the partitioning like in Oracle and
-        want to export all partitioned Oracle data into the main single
+        want to export all partitionned Oracle data into the main single
         table in PostgreSQL enable this directive. Ora2Pg will export all
-        data into the main table name. Default is to use partitioning,
+        data into the main table name. Default is to use partitionning,
         Ora2Pg will export data from each partition and import them into the
         PostgreSQL dedicated partition table.
 
@@ -1271,7 +1180,7 @@
         with a two or three dimensional polygon.
 
     CONVERT_SRID
-        This directive allow you to control the automatically conversion of
+        This directive allow you to control the automatically convertion of
         Oracle SRID to standard EPSG. If enabled, Ora2Pg will use the Oracle
         function sdo_cs.map_oracle_srid_to_epsg() to convert all SRID.
         Enabled by default.
@@ -1299,7 +1208,7 @@
         INTERNAL. When it is set to WKT, Ora2Pg will use
         SDO_UTIL.TO_WKTGEOMETRY() to extract the geometry data. When it is
         set to WKB, Ora2Pg will use the binary output using
-        SDO_UTIL.TO_WKBGEOMETRY(). If those two extract type are calls at
+        SDO_UTIL.TO_WKBGEOMETRY(). If those two extract type are calles at
         Oracle side, they are slow and you can easily reach Out Of Memory
         when you have lot of rows. Also WKB is not able to export 3D
         geometry and some geometries like CURVEPOLYGON. In this case you may
@@ -1335,8 +1244,8 @@
         When Ora2Pg detect a table with some BLOB it will automatically
         reduce the value of this directive by dividing it by 10 until his
         value is below 1000. You can control this value by setting
-        BLOB_LIMIT. Exporting BLOB use lot of resources, setting it to a too
-        high value can produce OOM.
+        BLOB_LIMIT. Exporting BLOB use lot of ressources, setting it to a
+        too high value can produce OOM.
 
     OUTPUT
         The Ora2Pg output filename can be changed with this directive.
@@ -1347,8 +1256,8 @@
         installed for the .bz2 extension.
 
     OUTPUT_DIR
-        Since release 7.0, you can define a base directory where the file
-        will be written. The directory must exists.
+        Since release 7.0, you can define a base directory where wfile will
+        be written. The directory must exists.
 
     BZIP2
         This directive allow you to specify the full path to the bzip2
@@ -1437,7 +1346,7 @@
     STOP_ON_ERROR
         Set this parameter to 0 to not include the call to \set
         ON_ERROR_STOP ON in all SQL scripts generated by Ora2Pg. By default
-        this order is always present so that the script will immediately
+        this order is always present so that the script will immediatly
         abort when an error is encountered.
 
     COPY_FREEZE
@@ -1477,127 +1386,9 @@
         in this set this directive to 1, ora2pg will not try to change the
         setting.
 
-  Column tytpe control
-    PG_NUMERIC_TYPE
-        If set to 1 replace portable numeric type into PostgreSQL internal
-        type. Oracle data type NUMBER(p,s) is approximatively converted to
-        real and float PostgreSQL data type. If you have monetary fields or
-        don't want rounding issues with the extra decimals you should
-        preserve the same numeric(p,s) PostgreSQL data type. Do that only if
-        you need very good precision because using numeric(p,s) is slower
-        than using real or double.
-
-    PG_INTEGER_TYPE
-        If set to 1 replace portable numeric type into PostgreSQL internal
-        type. Oracle data type NUMBER(p) or NUMBER are converted to
-        smallint, integer or bigint PostgreSQL data type following the
-        length of the precision. If NUMBER without precision are set to
-        DEFAULT_NUMERIC (see bellow).
-
-    DEFAULT_NUMERIC
-        NUMBER without precision are converted by default to bigint only if
-        PG_INTEGER_TYPE is true. You can overwrite this value to any PG
-        type, like integer or float.
-
-    DATA_TYPE
-        If you're experiencing any problem in data type schema conversion
-        with this directive you can take full control of the correspondence
-        between Oracle and PostgreSQL types to redefine data type
-        translation used in Ora2pg. The syntax is a comma-separated list of
-        "Oracle datatype:Postgresql datatype". Here are the default list
-        used:
-
-                DATA_TYPE       DATE:timestamp,LONG:text,LONG RAW:bytea,CLOB:text,NCLOB:text,BLOB:bytea,BFILE:bytea,RAW:bytea,ROWID:oid,FLOAT:double precision,DEC:decimal,DECIMAL:decimal,DOUBLE PRECISION:double precision,INT:numeric,INTEGER:numeric,REAL:real,SMALLINT:smallint,BINARY_FLOAT:double precision,BINARY_DOUBLE:double precision,TIMESTAMP:timestamp,XMLTYPE:xml,BINARY_INTEGER:integer,PLS_INTEGER:integer,TIMESTAMP WITH TIME ZONE:timestamp with time zone,TIMESTAMP WITH LOCAL TIME ZONE:timestamp with time zone
-
-        Note that the directive and the list definition must be a single
-        line.
-
-        If you want to replace a type with a precision and scale you need to
-        escape the coma with a backslash. For example, if you want to
-        replace all NUMBER(*,0) into bigint instead of numeric(38) add the
-        following:
-
-               DATA_TYPE       NUMBER(*\,0):bigint
-
-        You don't have to recopy all default type conversion but just the
-        one you want to rewrite.
-
-        There's a special case with BFILE when they are converted to type
-        TEXT, they will just contains the full path to the external file. If
-        you set the destination type to BYTEA, the default, Ora2Pg will
-        export the content of the BFILE as bytea. The third case is when you
-        set the destination type to EFILE, in this case, Ora2Pg will export
-        it as an EFILE record: (DIRECTORY, FILENAME). Use the DIRECTORY
-        export type to export the existing directories as well as privileges
-        on those directories.
-
-        There's no SQL function available to retrieve the path to the BFILE,
-        then Ora2Pg have to create one using the DBMS_LOB package.
-
-                CREATE OR REPLACE FUNCTION ora2pg_get_bfilename( p_bfile IN BFILE )
-                RETURN VARCHAR2
-                  AS
-                    l_dir   VARCHAR2(4000);
-                    l_fname VARCHAR2(4000);
-                    l_path  VARCHAR2(4000);
-                  BEGIN
-                    dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
-                    SELECT directory_path INTO l_path FROM all_directories
-                        WHERE directory_name = l_dir;
-                    l_dir := rtrim(l_path,'/');
-                    RETURN l_dir || '/' || l_fname;
-                  END;
-
-        This function is only created if Ora2Pg found a table with a BFILE
-        column and that the destination type is TEXT. The function is
-        dropped at the end of the export. This concern both, COPY and INSERT
-        export type.
-
-        There's no SQL function available to retrieve BFILE as an EFILE
-        record, then Ora2Pg have to create one using the DBMS_LOB package.
-
-                CREATE OR REPLACE FUNCTION ora2pg_get_efile( p_bfile IN BFILE )
-                RETURN VARCHAR2
-                  AS
-                    l_dir   VARCHAR2(4000);
-                    l_fname VARCHAR2(4000);
-                  BEGIN
-                    dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
-                    RETURN '(' || l_dir || ',' || l_fnamei || ')';
-                  END;
-
-        This function is only created if Ora2Pg found a table with a BFILE
-        column and that the destination type is EFILE. The function is
-        dropped at the end of the export. This concern both, COPY and INSERT
-        export type.
-
-        To set the destination type, use the DATA_TYPE configuration
-        directive:
-
-                DATA_TYPE       BFILE:EFILE
-
-        for example.
-
-        The EFILE type is a user defined type created by the PostgreSQL
-        extension external_file that can be found here:
-        https://github.com/darold/external_file This is a port of the BFILE
-        Oracle type to PostgreSQL.
-
-    MODIFY_TYPE
-        Some time you need to force the destination type, for example a
-        column exported as timestamp by Ora2Pg can be forced into type date.
-        Value is a comma-separated list of TABLE:COLUMN:TYPE structure. If
-        you need to use comma or space inside type definition you will have
-        to backslash them.
-
-                MODIFY_TYPE     TABLE1:COL3:varchar,TABLE1:COL4:decimal(9\,6)
-
-        Type of table1.col3 will be replaced by a varchar and table1.col4 by
-        a decimal with precision.
-
   Taking export under control
     The following other configuration directives interact directly with the
-    export process and give you fine granularity in database export control.
+    export process and give you fine granuality in database export control.
 
     SKIP
         For TABLE export you may not want to export all schema constraints,
@@ -1665,7 +1456,7 @@
         schema export (TABLE export type).
 
     DROP_FKEY
-        If deferring foreign keys is not possible due to the amount of data
+        If deferring foreign keys is not possible du to the amount of data
         in a single transaction, you've not exported foreign keys as
         deferrable or you are using direct import to PostgreSQL, you can use
         the DROP_FKEY directive.
@@ -1712,25 +1503,120 @@
         used during data export to build INSERT statements. See NOESCAPE for
         enabling/disabling escape in COPY statements.
 
+    PG_NUMERIC_TYPE
+        If set to 1 replace portable numeric type into PostgreSQL internal
+        type. Oracle data type NUMBER(p,s) is approximatively converted to
+        real and float PostgreSQL data type. If you have monetary fields or
+        don't want rounding issues with the extra decimals you should
+        preserve the same numeric(p,s) PostgreSQL data type. Do that only if
+        you need very good precision because using numeric(p,s) is slower
+        than using real or double.
+
+    PG_INTEGER_TYPE
+        If set to 1 replace portable numeric type into PostgreSQL internal
+        type. Oracle data type NUMBER(p) or NUMBER are converted to
+        smallint, integer or bigint PostgreSQL data type following the
+        length of the precision. If NUMBER without precision are set to
+        DEFAULT_NUMERIC (see bellow).
+
+    DEFAULT_NUMERIC
+        NUMBER without precision are converted by default to bigint only if
+        PG_INTEGER_TYPE is true. You can overwrite this value to any PG
+        type, like integer or float.
+
+    DATA_TYPE
+        If you're experiencing any problem in data type schema conversion
+        with this directive you can take full control of the correspondence
+        between Oracle and PostgreSQL types to redefine data type
+        translation used in Ora2pg. The syntax is a comma-separated list of
+        "Oracle datatype:Postgresql datatype". Here are the default list
+        used:
+
+                DATA_TYPE       DATE:timestamp,LONG:text,LONG RAW:bytea,CLOB:text,NCLOB:text,BLOB:bytea,BFILE:bytea,RAW:bytea,ROWID:oid,FLOAT:double precision,DEC:decimal,DECIMAL:decimal,DOUBLE PRECISION:double precision,INT:numeric,INTEGER:numeric,REAL:real,SMALLINT:smallint,BINARY_FLOAT:double precision,BINARY_DOUBLE:double precision,TIMESTAMP:timestamp,XMLTYPE:xml,BINARY_INTEGER:integer,PLS_INTEGER:integer,TIMESTAMP WITH TIME ZONE:timestamp with time zone,TIMESTAMP WITH LOCAL TIME ZONE:timestamp with time zone
+
+        Note that the directive and the list definition must be a single
+        line.
+
+        There's a special case with BFILE when they are converted to type
+        TEXT, they will just contains the full path to the external file. If
+        you set the destination type to BYTEA, the default, Ora2Pg will
+        export the content of the BFILE as bytea. The third case is when you
+        set the destination type to EFILE, in this case, Ora2Pg will export
+        it as an EFILE record: (DIRECTORY, FILENAME). Use the DIRECTORY
+        export type to export the existing directories as well as priviledge
+        on those directories.
+
+        There's no SQL function available to retrieve the path to the BFILE,
+        then Ora2Pg have to create one using the DBMS_LOB package.
+
+                CREATE OR REPLACE FUNCTION ora2pg_get_bfilename( p_bfile IN BFILE )
+                RETURN VARCHAR2
+                  AS
+                    l_dir   VARCHAR2(4000);
+                    l_fname VARCHAR2(4000);
+                    l_path  VARCHAR2(4000);
+                  BEGIN
+                    dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
+                    SELECT directory_path INTO l_path FROM all_directories
+                        WHERE directory_name = l_dir;
+                    l_dir := rtrim(l_path,'/');
+                    RETURN l_dir || '/' || l_fname;
+                  END;
+
+        This function is only created if Ora2Pg found a table with a BFILE
+        column and that the destination type is TEXT. The function is
+        dropped at the end of the export. This concern both, COPY and INSERT
+        export type.
+
+        There's no SQL function available to retrieve BFILE as an EFILE
+        record, then Ora2Pg have to create one using the DBMS_LOB package.
+
+                CREATE OR REPLACE FUNCTION ora2pg_get_efile( p_bfile IN BFILE )
+                RETURN VARCHAR2
+                  AS
+                    l_dir   VARCHAR2(4000);
+                    l_fname VARCHAR2(4000);
+                  BEGIN
+                    dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
+                    RETURN '(' || l_dir || ',' || l_fnamei || ')';
+                  END;
+
+        This function is only created if Ora2Pg found a table with a BFILE
+        column and that the destination type is EFILE. The function is
+        dropped at the end of the export. This concern both, COPY and INSERT
+        export type.
+
+        To set the destination type, use the DATA_TYPE configuration
+        directive:
+
+                DATA_TYPE       BFILE:EFILE
+
+        for example.
+
+        The EFILE type is a user defined type created by the PostgreSQL
+        extension external_file that can be found here:
+        https://github.com/darold/external_file This is a port of the BFILE
+        Oracle type to PostgreSQL.
+
     TRIM_TYPE
         If you want to convert CHAR(n) from Oracle into varchar(n) or text
         on PostgreSQL using directive DATA_TYPE, you might want to do some
         triming on the data. By default Ora2Pg will auto-detect this
-        conversion and remove any whitespace at both leading and trailing
+        conversion and remove any withspace at both leading and trailing
         position. If you just want to remove the leadings character set the
         value to LEADING. If you just want to remove the trailing character,
         set the value to TRAILING. Default value is BOTH.
 
     TRIM_CHAR
-        The default trimming character is space, use this directive if you
+        The default triming character is space, use this directive if you
         need to change the character that will be removed. For example, set
         it to - if you have leading - in the char(n) field. To use space as
-        trimming charger, comment this directive, this is the default value.
+        triming charger, comment this directive, this is the default value.
 
     PRESERVE_CASE
         If you want to preserve the case of Oracle object name set this
         directive to 1. By default Ora2Pg will convert all Oracle object
-        names to lower case. I do not recommend to enable this unless you
+        names to lower case. I do not recommand to enable this unless you
         will always have to double-quote object names on all your SQL
         scripts.
 
@@ -1757,7 +1643,7 @@
     PG_SUPPORTS_IFEXISTS
         PostgreSQL version below 9.x do not support IF EXISTS in DDL
         statements. Disabling the directive with value 0 will prevent Ora2Pg
-        to add those keywords in all generated statements. Default value is
+        to add those keywords in all generated statments. Default value is
         1, enabled.
 
     PG_SUPPORTS_ROLE (Deprecated)
@@ -1799,7 +1685,7 @@
         by default.
 
     BITMAP_AS_GIN
-        Use btree_gin extension to create bitmap like index with pg >= 9.4
+        Use btree_gin extenstion to create bitmap like index with pg >= 9.4
         You will need to create the extension by yourself: create extension
         btree_gin; Default is to create GIN index, when disabled, a btree
         index will be created
@@ -1837,7 +1723,7 @@
 
     NO_LOB_LOCATOR
         Disable this if you don't want to load full content of BLOB and CLOB
-        and use LOB locators instead. This is useful to not having to set
+        and use LOB locators instead. This is usefull to not having to set
         LONGREADLEN. Note that this will not improve speed of BLOB export as
         most of the time is always consumed by the bytea escaping and in
         this case data will be processed line by line and not by chunk of
@@ -1917,7 +1803,7 @@
         http://www.postgresql.org/docs/9.0/static/multibyte.html
 
   PLSQL to PLPSQL convertion
-    Automatic code conversion from Oracle PLSQL to PostgreSQL PLPGSQL is a
+    Automatic code convertion from Oracle PLSQL to PostgreSQL PLPGSQL is a
     work in progress in Ora2Pg and surely you will always have manual work.
     The Perl code used for automatic conversion is all stored in a specific
     Perl Module named Ora2Pg/PLSQL.pm feel free to modify/add you own code
@@ -1925,7 +1811,7 @@
     and package body headers and parameters rewrite.
 
     PLSQL_PGSQL
-        Enable/disable PLSQL to PLPSQL conversion. Enabled by default.
+        Enable/disable PLSQL to PLPSQL convertion. Enabled by default.
 
     NULL_EQUAL_EMPTY
         Ora2Pg can replace all conditions with a test on NULL by a call to
@@ -1959,19 +1845,6 @@
         parsed by the PLSQL to PLPGSQL converter. PLSQL_PGSQL must be
         enabled or -p used in command line.
 
-    REWRITE_OUTER_JOIN
-        Enable this directive if the rewrite of Oracle native syntax (+) of
-        OUTER JOIN is broken. This will force Ora2Pg to not rewrite such
-        code, default is to try to rewrite simple form of rigth outer join
-        for the moment.
-
-    UUID_FUNCTION
-        By default Ora2Pg will convert call to SYS_GUID() Oracle function
-        with a call to uuid_generate_v4 from uuid-ossp extension. You can
-        redefined it to use the gen_random_uuid function from pgcrypto
-        extension by changing the function name. Default to
-        uuid_generate_v4.
-
   Materialized view
     Since PostgreSQL 9.3, materialized view are supported with the CREATE
     MATERIALIZED VIEW syntax, to force Ora2Pg to use the native PostgreSQL
@@ -2003,13 +1876,13 @@
             CREATE VIEW mviewname_mview AS
             SELECT ... FROM ...;
 
-            SELECT create_materialized_view('mviewname','mviewname_mview', change with the name of the column to used for the index);
+            SELECT create_materialized_view('mviewname','mviewname_mview', change with the name of the colum to used for the index);
 
-    The first argument is the name of the materialized view, the second the
+    The first argument is the name of the materializd view, the second the
     name of the view on which the materialized view is based and the third
     is the column name on which the index should be build (aka most od the
     time the primary key). This column is not automatically deduced so you
-    need to replace its name.
+    need to repace its name.
 
     As said above Ora2Pg only supports snapshot materialized views so the
     table will be entirely refreshed by issuing first a truncate of the
@@ -2269,11 +2142,11 @@
             5 = difficult: stored functions and/or triggers with code rewriting
 
     This assessment consist in a letter A or B to specify if the migration
-    needs manual rewriting or not. And a number from 1 up to 5 to give you a
-    technical difficulty level. You have an additional option
+    needs manual rewritting or not. And a number from 1 up to 5 to give you
+    a technical difficulty level. You have an additional option
     --human_days_limit to specify the number of human-days limit where the
     migration level should be set to C to indicate that it need a huge
-    amount of work and a full project management with migration support.
+    amount of work and a full project managment with migration support.
     Default is 10 human-days. You can use the configuration directive
     HUMAN_DAYS_LIMIT to change this default value permanently.
 
@@ -2311,7 +2184,8 @@
        The CSV field separator must be a comma.
 
     It will generate a CSV file with the assessment result, one line per
-    schema or database and a detailed HTML report for each database scanned.
+    schema or database and a detailled HTML report for each database
+    scanned.
 
     Hint: Use the -t | --test option before to test all your connections in
     your CSV file.
@@ -2329,7 +2203,7 @@
     in the hash %UNCOVERED_SCORE initialization.
 
     This assessment method is a work in progress so I'm expecting feedbacks
-    on migration experiences to polish the scores/units attributed in those
+    on migration experiences to polish the scores/units attribued in those
     variables.
 
   Improving indexes and constraints creation speed
@@ -2361,7 +2235,7 @@
 
     need to be "translated" into a table using BLOB as follow:
 
-            CREATE TABLE test_blob (id NUMBER, c1 BLOB);
+            CREATE TABLE test_blob (id NUMNER, c1 BLOB);
 
     And then copy the data with the following INSERT query:
 
@@ -2371,27 +2245,6 @@
     EXCLUDE directive) and to renamed the new temporary table on the fly
     using the REPLACE_TABLES configuration directive.
 
-  Global variables
-    Oracle allow the use of global variables defined in packages. Ora2Pg
-    will export these variables for PostgreSQL as user defined custom
-    variables available in a session. Oracle variables assignement are
-    exported as call to:
-
-        PERFORM set_config('pkgname.varname', value, false);
-
-    Use of these variables in the code is replaced by:
-
-        current_setting('pkgname.varname')::global_variables_type;
-
-    where global_variables_type is the type of the variable extracted from
-    the package definition.
-
-    If the variable is a constant or have a default value assigned at
-    declaration, ora2pg will create a file global_variables.conf with the
-    definition to include in the postgresql.conf file so that their values
-    will already be set at database connection. Note that the value can
-    always modified by the user so you can not have exactly a constant.
-
   Hints
     Converting your queries with Oracle style outer join (+) syntax to ANSI
     standard SQL at the Oracle side can save you lot of time for the
@@ -2515,7 +2368,7 @@
     applied.
 
 LICENSE
-    Copyright (c) 2000-2017 Gilles Darold - All rights reserved.
+    Copyright (c) 2000-2016 Gilles Darold - All rights reserved.
 
             This program is free software: you can redistribute it and/or modify
             it under the terms of the GNU General Public License as published by
@@ -2532,5 +2385,5 @@
 
 ACKNOWLEDGEMENT
     I must thanks a lot all the great contributors, see changelog for all
-    acknowledgments.
+    acknowledgements.
 
diff -Nru ora2pg-18.0/changelog ora2pg-17.6/changelog
--- ora2pg-18.0/changelog	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/changelog	2016-11-18 05:45:49.000000000 +0800
@@ -1,266 +1,3 @@
-2017 01 29 - v18.0
-
-This new major release adds several new useful features and lot of
-improvements.
-
-  * Automatic rewrite of simple form of (+) outer join Oracle's
-    syntax. This major feature makes Ora2Pg become the first free
-    tool that is able to rewrite automatically (+) outer join in
-    command line mode. This works with simple form of outer join
-    but this is a beginning.
-  * Add export of Oracle's virtual column using a real column and
-    a trigger.
-  * Allow conversion of RAW/CHAR/VARCHAR2 type with precision in
-    DATA_TYPE directive. Useful for example to transform all RAW(32)
-    or VARCHAR2(32) columns into PostgreSQL special type uuid.
-  * Add export NOT VALIDATED state from Oracle foreign keys and check
-    constraints into NOT VALID constraints in PostgreSQL.
-  * Replace call to SYS_GUID() with uuid_generate_v4() by default.
-  * Add "CREATE EXTENSION IF NOT EXISTS dblink;" before an autonomous
-    transaction or "CREATE EXTENSION IF NOT EXISTS pg_background;".
-  * Major rewrite of the way Ora2Pg parse PL/SQL to rewrite function
-    calls and other PL/SQL to plpgsql replacement. There should not
-    be any limitation in rewriting when a function contains a sub
-    query or an other function call inside his parameters.
-  * Refactoring of ora2pg to not requires any dependency other than
-    the Perl DBI module by default. All DBD drivers are now optionals
-    and ora2pg will expect an Oracle DDL file as input by default.
-  * Add export of Oracle's global variables defined in package. They
-    are exported as user defined custom variables and available in
-    a session. If the variable is a constant or have a default value
-    assigned at declaration, ora2pg will create a new file with the
-    declaration (global_variables.conf) to be included in the main
-    configuration file postgresql.conf file.
-  * Create full text search configuration when USE_UNACCENT directive
-    is enabled using the auto detected stemmer or the one defined in
-    FTS_CONFIG. For example:
-  	CREATE TEXT SEARCH CONFIGURATION fr (COPY = french);
-    	ALTER TEXT SEARCH CONFIGURATION fr ALTER MAPPING FOR
-    		hword, hword_part, word WITH unaccent, french_stem;
-    	CREATE INDEX place_notes_cidx ON places
-    			USING gin(to_tsvector('fr', place_notes));
-    
-Changes and incompatibilities from previous release:
-
-  * FTS_INDEX_ONLY is now enabled by default because the addition of
-    a column is not always possible and not always necessary where a
-    simple function-based index is enough.
-  * Remove use to setweigth() on single column FTS based indexes.
-  * Change default behaviour of Ora2Pg in Full Text Search index
-    export.
-
-A new command line option and some configuration directive have
-been added:
-
-  * Option -D | --data_type to allow custom data type replacement
-    at command line like in configuration file with DATA_TYPE.
-  * UUID_FUNCTION to be able to redefined the function called to
-    replace SYS_GUID() Oracle function. Default to uuid_generate_v4.
-  * REWRITE_OUTER_JOIN to be able to disable the rewriting of Oracle
-    native syntax (+) into OUTER JOIN if rewritten code is broken.
-  * USE_UNACCENT and USE_LOWER_UNACCENT configuration directives to
-    use the unaccent extension with pg_trgm with the FTS indexes.
-  * FTS_INDEX_ONLY, by default Ora2Pg creates an extra tsvector column
-    with a dedicated triggers for FTS indexes. Enable this directive
-    if you just want a function-based index like:
-	CREATE INDEX ON t_document USING
-		gin(to_tsvector('pg_catalog.english', title));
-  * FTS_CONFIG, use this directive to force the text search stemmer
-    used with the to_tsvector() function. Default is to auto detect
-    the Oracle FTS stemmer. For example, setting FTS_CONFIG to
-    pg_catalog.english or pg_catalog.french will override the auto
-    detected stemmer.
-
-There's also lot fixes of issues reported by users from the past two
-months, here is the complete list of changes:
-
-  - Fix return type in function with a single inout parameter and a
-    returned type.
-  - Prevent wrong rewrite of empty as null when a function is used.
-    Thanks to Pavel Stehule for the report.
-  - Add the UUID_FUNCTION configuration directive. By default Ora2Pg
-    will convert call to SYS_GUID() Oracle function with a call to
-    uuid_generate_v4 from uuid-ossp extension. You can redefined it
-    to use the gen_random_uuid function from pgcrypto extension by
-    changing the function name. Default to uuid_generate_v4. Thanks
-    to sjimmerson for the feature request.
-  - Add rewrite of queries with simple form of left outer join syntax
-    (+) into the ansi form.
-  - Add new command line option -D | --data_type to allow custom data
-    type replacement at command line like in configuration file with
-    DATA_TYPE.
-  - Fix type in ROWNUM replacement expression. Thanks to Pavel Stehule
-    for the report.
-  - Add replacement of SYS_GUID by uuid_generate_v4 and allow custom
-    rewriting of RAW type. Thanks to Nicolas Martin for the report.
-  - Fix missing WHERE clause in ROWNUM replacement with previous patch
-    thanks to Pavel Stehule for the report.
-  - Fix ROWNUM replacement when e sub select is used. Thanks to Pavel
-    Stehule for the report.
-  - Fix wrong syntax in index creation with DROP_INDEXES enabled.
-    Thanks to Pave Stehule for the report.
-  - Remove replacement of substr() by substring() as PostgreSQL have
-    the substr() function too. Thanks to Pavel Stehule for the report.
-  - Move LIMIT replacement for ROWNUM to the end of the query. Thanks
-    to Pavel Stehule for the report.
-  - Fix text default value between parenthesis in table declaration.
-    Thanks to Pavel Stehule for the report.
-  - Fix return type when a function have IN/OUT parameter. Thanks to
-    Pavel Stehule for the report.
-  - Mark uuid type to be exported as text. Thanks to sjimmerson for
-    the report.
-  - Add EXECUTE to open cursor with like "OPEN var1 FOR var2;". Thanks
-    to Pavel Stehule for the report.
-  - Fix replacement of local type ref cursor. Thanks to Pavel Stehule
-    for the report.
-  - Add EXECUTE keyword to OPEN CURSOR ... FOR with dynamic query.
-    Thanks to Pavel Stehule for the report.
-  - Fix case sensitivity issue in FOR .. IN variable declaration
-    replacement. Thanks to Pavel Stehule for the report.
-  - Fix wrong replacement of cast syntax ::. Thanks to Pavel Stehule
-    for the report.
-  - Reactivate numeric cast in call to round(value,n).
-  - Close main output data file at end of export.
-  - Add virtual column state in column info report, first stage to
-    export those columns as columns with associated trigger.
-  - Fix unwanted replacement of REGEXP_INSTR. Thanks to Bernard
-    Bielecki for the report.
-  - Allow rewrite of NUMBER(*, 0) into bigint or other type instead
-    numeric(38), just set DATA_TYPE to NUMBER(*\,0):bigint. Thanks to
-    kuzmaka for the feature request.
-  - Export partitions indexes into PARTITION_INDEXES_....sql separate
-    file named. Thanks to Nicolas Martin for the feature request.
-  - Fix fatal error when schema CTXSYS does not exists. Thanks to
-    Bernard Bielecki for the report.
-  - Fix missing text value replacement. Thanks to Bernard Bielecki
-    for the report.
-  - Fix type replacement in declare section when the keyword END was
-    present into a variable name.
-  - Export NOT VALIDATED Oracle foreign key and check constraint as
-    NOT VALID in PostgreSQL. Thanks to Alexey for the feature request.
-  - Add object matching of regex 'SYS_.*\$' to the default exclusion
-    list.
-  - Fix UTF8 output to file as the open pragma "use open ':utf8';"
-    doesn't works in a global context. binmode(':encoding(...)') is
-    used on each file descriptor for data output.
-  - Improve parsing of tables/indexes/constraints/tablespaces DDL from
-    file.
-  - Improve parsing of sequences DDL from file.
-  - Improve parsing of user defined types DDL from file.
-  - Export Oracle's TYPE REF CURSOR with a warning as not supported.
-  - Replace call to plsql_to_plpgsql() in Ora2Pg.pm by a call to new
-    function convert_plsql_code().
-  - Move export of constraints after indexes to be able to use USING
-    index in constraint creation without error complaining that index
-    does not exists.
-  - Add "CREATE EXTENSION IF NOT EXISTS dblink;" before an autonomous
-    transaction or "CREATE EXTENSION IF NOT EXISTS pg_background;".
-  - Improve parsing of packages DDL from file.
-  - When a variable in "FOR varname IN" statement is not found in the
-    DECLARE bloc, Ora2Pg will automatically add the variable to this
-    bloc declared as a RECORD. Thanks to Pavel Stehule for the report.
-  - Major rewrite of the way Ora2Pg parse PL/SQL to rewrite function
-    calls and other PL/SQL to plpgsql replacement. There should not
-    be limitation in rewriting when a function contains a sub query
-    or an other function call inside his parameters.
-  - Fix unwanted SELECT to PERFORM transformation inside literal
-    strings. Thanks to Pavel Stehule for the report.
-  - Fix bug in DEFAULT value rewriting. Thanks to Pavel Stehule for
-    the report.
-  - Fix replacement of DBMS_OUTPUT.put_line with RAISE NOTICE.
-  - Reset global variable storage for each package.
-  - Improve comment parsing in packages and prevent possible infinite
-    loop in global variable replacement.
-  - Add the REWRITE_OUTER_JOIN configuration directive to be able to
-    disable the rewriting of Oracle native syntax (+) into OUTER JOIN
-    if it is broken.  Default is to try to rewrite simple form of
-    right outer join for the moment.
-  - Export types and cursors declared as global objects in package
-    spec header into the main output file for package export. Types
-    and cursors declared into the package body are exported into the
-    output file of the first function declared in this package.
-  - Globals variables declared into the package spec header are now
-    identified and replaced into the package code with the call to
-    user defined custom variable. It works just like globals variables
-    declared into the package body.
-  - Add auto detection of Oracle FTS stemmer and disable FTS_CONFIG
-    configuration directive per default. When FTS_CONFIG is set its
-    value will overwrite the auto detected value.
-  - Create full text search configuration when USE_UNACCENT directive
-    is enabled using the auto detected stemmer or the one defined in
-    FTS_CONFIG. For example:
-  	CREATE TEXT SEARCH CONFIGURATION fr (COPY = french);
-    	ALTER TEXT SEARCH CONFIGURATION fr ALTER MAPPING FOR
-    		hword, hword_part, word WITH unaccent, french_stem;
-    	CREATE INDEX place_notes_cidx ON places
-    			USING gin(to_tsvector('fr', place_notes));
-  - Remove CONTAINS(ABOUT()) from the migration assessment, there no
-    additional difficulty to CONTAINS rewrite.
-  - Add ANYDATA to the migration assessment keyword to detect.
-  - Allow conversion of CHAR/VARCHAR2 type with precision in DATA_TYPE
-    directive. For example it's possible to transform all VARCHAR2(32)
-    columns only into PostgreSQL special type uuid by setting:
-        DATA_TYPE	VARCHAR2(32):uuid
-    Thanks to sjimmerson for the feature request.
-  - Update year in copyrights
-  - Fix creation of schema when CREATE_SCHEMA+PG_SCHEMA are defined.
-  - Fix renaming of temporary file when exporting partitions.
-  - Move MODIFY_TYPE to the type section
-  - Update documentation about globals variables.
-  - Add export of Oracle's global variables defined in package. They
-    are exported as user defined custom variables and available in
-    a session. Oracle variables assignment are exported as call to:
-            PERFORM set_config('pkgname.varname', value, false);
-    Use of these variable in the code is replaced by:
-            current_setting('pkgname.varname')::global_variables_type;
-    the variable type is extracted from the pacjkage definition. If
-    the variable is a constant or have a default value assigned at
-    declaration, ora2pg will create file global_variables.conf with
-    the definition to include in postgresql.conf file so that their
-    values will already be set at database connection. Note that the
-    value can always modified by the user so you can not have exactly
-    a constant.
-  - Fix migration assessment of view.
-  - Remove call to FROM SYS.DUAL, only FROM DUAL was replaced.
-  - Replace call to trim into btrim.
-  - Improve rewrite of DECODE when there is function call inside.
-  - Add function replace_right_outer_join() to rewrite Oracle (+)
-    right outer join.
-  - Improve view migration assessment.
-  - Create a FTS section in the configuration file dedicated to FTS
-    control.
-  - Add USE_UNACCENT and USE_LOWER_UNACCENT configuration directives
-    to use the unaccent extension with pg_trgm.
-  - Do not create FTS_INDEXES_* file when there is no Oracle Text
-    indexes.
-  - Update query test score when CONTAINS, SCORE, FUZZY, ABOUT, NEAR
-    keyword are found.
-  - Remove use to setweigth() on single column FTS based indexes.
-    Thanks to Adrien Nayrat for the report.
-  - Update documentation on FTS_INDEX_ONLY with full explanation on
-    the Ora2Pg transformation.
-  - Refactoring ora2pg to not requires any dependency other than the
-    Perl DBI module by default. All DBD drivers are now optionals and
-    ora2pg will expect to received an Oracle DDL file as input by
-    default. This makes easiest packaging or for any distribution that
-    can not build a package because of the DBD::Oracle requirement.
-    DBD::Oracle, DBD::MySQL and DBD::Pg are still required if you want
-    Ora2Pg to migrate your database "on-line" but they are optional
-    because Ora2Pg can also convert input DDL file, this is the
-    default now. Thanks to Gustavo Panizzo for the feature request and
-    the work on Debian packaging.
-  - Remove String::Random dependency in rpm spec file, it is no used
-    even if it was mentioned into a comment.
-  - Exclude internal Oracle Streams AQ JMS types from the export.
-    Thanks to Joanna Xu for the report.
-  - Fix some other spelling issues. Thanks to Gustavo Panizzo for the
-    patch.
-  - Fix some spelling errors. Thanks to Gustavo Panizzo for the patch.
-  - Revert patch 697f09d that was breaking encoding with input file
-    (-i). Thanks to Gary Evans for the report.
-  - Add two new configuration directive to control FTS settings,
-    FTS_INDEX_ONLY and FTS_CONFIG.
-   
 2016 11 17 - v17.6
 
 This release adds several new features:
@@ -531,7 +268,7 @@
   - Fix debug mode that was interromping the last running table dump.
     Thanks to calbiston for the report.
 
-2016 04 21 - v17.4
+2016 04 21 - v17.6
 
 Errata in first release attempt.
 
diff -Nru ora2pg-18.0/debian/changelog ora2pg-17.6/debian/changelog
--- ora2pg-18.0/debian/changelog	2017-02-01 10:33:18.000000000 +0800
+++ ora2pg-17.6/debian/changelog	2016-12-04 17:13:21.000000000 +0800
@@ -1,18 +1,3 @@
-ora2pg (18.0-1) experimental; urgency=medium
-
-  * QA upload
-  * New upstream release
-  * Patches refreshed
-    - 01_Ora2Pg.pod.diff
-    - 02_remove_unnecessary_files.diff
-  * Patches dropped, applied upstream
-    - cf81287191c1560b238fe2967560d8bff33e24b0.patch
-    - spelling-fixes.patch
-  * Moved out from contrib, as ora2pg now supports both Oracle and MySQL
-  * Update copyright years
-
- -- gustavo panizzo <gfa@zumbi.com.ar>  Wed, 01 Feb 2017 10:33:18 +0800
-
 ora2pg (17.6-1) unstable; urgency=medium
 
   * QA upload
diff -Nru ora2pg-18.0/debian/control ora2pg-17.6/debian/control
--- ora2pg-18.0/debian/control	2017-02-01 10:33:18.000000000 +0800
+++ ora2pg-17.6/debian/control	2016-12-04 17:13:21.000000000 +0800
@@ -1,6 +1,6 @@
 Source: ora2pg
 Maintainer: Debian QA Group <packages@qa.debian.org>
-Section: database
+Section: contrib/database
 Priority: extra
 Build-Depends: debhelper (>= 10)
 Build-Depends-Indep: perl
@@ -11,13 +11,13 @@
 
 Package: ora2pg
 Architecture: all
-Depends: libdbd-mysql-perl | libdbd-oracle-perl,
+Depends: libdbd-oracle-perl,
          libdbd-pg-perl,
          libdbi-perl,
          libio-compress-perl,
          ${misc:Depends},
          ${perl:Depends}
-Recommends: postgresql-client
+Recommends: postgresql-client, libdbd-mysql-perl
 Description: Oracle/MySQL to PostgreSQL database schema converter
  This package contains a Perl module and a companion script to convert an
  Oracle or MySQL database schema, data and stored procedures to a
diff -Nru ora2pg-18.0/debian/copyright ora2pg-17.6/debian/copyright
--- ora2pg-18.0/debian/copyright	2017-02-01 10:33:18.000000000 +0800
+++ ora2pg-17.6/debian/copyright	2016-12-01 17:56:37.000000000 +0800
@@ -4,7 +4,7 @@
 Source: http://sourceforge.net/projects/ora2pg/
 
 Files: *
-Copyright: 2000-2017 Gilles Darold <gilles@darold.net>
+Copyright: 2000-2016 Gilles Darold <gilles@darold.net>
 License: GPL-3+
 
 Files: debian/*
diff -Nru ora2pg-18.0/debian/patches/01_Ora2Pg.pod.diff ora2pg-17.6/debian/patches/01_Ora2Pg.pod.diff
--- ora2pg-18.0/debian/patches/01_Ora2Pg.pod.diff	2017-02-01 10:33:18.000000000 +0800
+++ ora2pg-17.6/debian/patches/01_Ora2Pg.pod.diff	2016-12-01 17:50:37.000000000 +0800
@@ -4,7 +4,7 @@
 Last-Update: 2011-01-25
 --- a/doc/Ora2Pg.pod
 +++ b/doc/Ora2Pg.pod
-@@ -127,7 +127,7 @@ Like any other Perl Module Ora2Pg can be
+@@ -123,7 +123,7 @@ Like any other Perl Module Ora2Pg can be
  	make && make install
  
  This will install Ora2Pg.pm into your site Perl repository, ora2pg into
@@ -13,7 +13,7 @@
  
  On Windows(tm) OSes you may use instead:
  
-@@ -213,7 +213,7 @@ file.
+@@ -209,7 +209,7 @@ file.
  By default Ora2Pg will look for /etc/ora2pg/ora2pg.conf configuration file, if
  the file exist you can simply execute:
  
@@ -22,7 +22,7 @@
  
  or under Windows(tm) run ora2pg.bat file, located in your perl bin directory.
  Windows(tm) users may also find a template configuration file in C:\ora2pg
-@@ -221,7 +221,7 @@ Windows(tm) users may also find a templa
+@@ -217,7 +217,7 @@ Windows(tm) users may also find a templa
  If you want to call another configuration file, just give the path as command
  line argument:
  
@@ -31,7 +31,7 @@
  
  Here are all command line parameters available when using ora2pg:
  
-@@ -605,7 +605,7 @@ will add users INTERNAL and SYSDBA to th
+@@ -600,7 +600,7 @@ will add users INTERNAL and SYSDBA to th
  =item FORCE_OWNER
  
  By default the owner of the database objects is the one you're using to connect
diff -Nru ora2pg-18.0/debian/patches/02_remove_unnecessary_files.diff ora2pg-17.6/debian/patches/02_remove_unnecessary_files.diff
--- ora2pg-18.0/debian/patches/02_remove_unnecessary_files.diff	2017-02-01 10:33:18.000000000 +0800
+++ ora2pg-17.6/debian/patches/02_remove_unnecessary_files.diff	2016-12-02 11:16:21.000000000 +0800
@@ -13,7 +13,7 @@
  if ($^O =~ /MSWin32|dos/i) {
  	$DEST_CONF_FILE = 'ora2pg_dist.conf';
  }
-@@ -1006,20 +1006,6 @@ MYSQL_INTERNAL_EXTRACT_FORMAT	0
+@@ -902,20 +902,6 @@ MYSQL_INTERNAL_EXTRACT_FORMAT	0
  };
  close(OUTCFG);
  
@@ -34,7 +34,7 @@
  WriteMakefile(
      'NAME'         => 'Ora2Pg',
      'VERSION_FROM' => 'lib/Ora2Pg.pm',
-@@ -1031,7 +1017,7 @@ WriteMakefile(
+@@ -927,7 +913,7 @@ WriteMakefile(
      'AUTHOR'       => 'Gilles Darold (gilles _AT_ darold _DOT_ net)',
      'ABSTRACT'     => 'Oracle to PostgreSQL migration toolkit',
      'EXE_FILES'    => [ qw(scripts/ora2pg scripts/ora2pg_scanner) ],
@@ -43,7 +43,7 @@
      'DESTDIR'      => $PREFIX,
      'INSTALLDIRS'  => $ENV{INSTALLDIRS},
      'clean'        => {FILES => "$DEST_CONF_FILE lib/blib/"},
-@@ -1065,7 +1051,6 @@ install_all :
+@@ -961,7 +947,6 @@ install_all :
  	\@\$(CP) -f $DEST_CONF_FILE $CONFDIR/$DEST_CONF_FILE
  	\@\$(MKPATH) $DOCDIR
  	\@\$(CP) -f README $DOCDIR/README
diff -Nru ora2pg-18.0/debian/patches/cf81287191c1560b238fe2967560d8bff33e24b0.patch ora2pg-17.6/debian/patches/cf81287191c1560b238fe2967560d8bff33e24b0.patch
--- ora2pg-18.0/debian/patches/cf81287191c1560b238fe2967560d8bff33e24b0.patch	1970-01-01 08:00:00.000000000 +0800
+++ ora2pg-17.6/debian/patches/cf81287191c1560b238fe2967560d8bff33e24b0.patch	2016-12-01 17:50:37.000000000 +0800
@@ -0,0 +1,88 @@
+From cf81287191c1560b238fe2967560d8bff33e24b0 Mon Sep 17 00:00:00 2001
+From: Gilles Darold <gilles.darold@dalibo.com>
+Date: Tue, 22 Nov 2016 23:31:44 +0100
+Subject: [PATCH] Revert patch 697f09d that was breaking encoding with input
+ file (-i). Thanks to Gary Evans for the report.
+
+---
+ lib/Ora2Pg.pm | 39 ++++++++++-----------------------------
+ 1 file changed, 10 insertions(+), 29 deletions(-)
+
+--- a/lib/Ora2Pg.pm
++++ b/lib/Ora2Pg.pm
+@@ -460,13 +460,6 @@ sub open_export_file
+ 		} else {
+ 			$filehdl = new IO::File;
+ 			$filehdl->open(">$outfile") or $self->logit("FATAL: Can't open $outfile: $!\n", 0, 1);
+-			# Force Perl to use utf8 I/O encoding by default
+-			if ( !$self->{'binmode'} || ($self->{nls_lang} =~ /UTF8/i) ) {
+-				use open ':utf8';
+-				$filehdl->binmode(':utf8');
+-			} elsif ($self->{'binmode'} =~ /^:/) {
+-				$filehdl->binmode($self->{binmode}) or die "FATAL: can't use open layer $self->{binmode} in append_export_file()\n";
+-			}
+ 		}
+ 		$filehdl->autoflush(1) if (defined $filehdl && !$self->{compress});
+ 	}
+@@ -507,13 +500,6 @@ sub create_export_file
+ 		} else {
+ 			$self->{fhout} = new IO::File;
+ 			$self->{fhout}->open(">>$outfile") or $self->logit("FATAL: Can't open $outfile: $!\n", 0, 1);
+-			# Force Perl to use utf8 I/O encoding by default
+-			if ( !$self->{'binmode'} || ($self->{nls_lang} =~ /UTF8/i) ) {
+-				use open ':utf8';
+-				$self->{fhout}->binmode(':utf8');
+-			} elsif ($self->{'binmode'} =~ /^:/) {
+-				$self->{fhout}->binmode($self->{binmode}) or die "FATAL: can't use open layer $self->{binmode} in append_export_file()\n";
+-			}
+ 		}
+ 		if ( $self->{compress} && (($self->{jobs} > 1) || ($self->{oracle_copies} > 1)) ) {
+ 			die "FATAL: you can't use compressed output with parallel dump\n";
+@@ -564,13 +550,6 @@ sub append_export_file
+ 			$filehdl = new IO::File;
+ 			$filehdl->open(">>$outfile") or $self->logit("FATAL: Can't open $outfile: $!\n", 0, 1);
+ 			$filehdl->autoflush(1);
+-			# Force Perl to use utf8 I/O encoding by default
+-			if ( !$self->{'binmode'} || ($self->{nls_lang} =~ /UTF8/i) ) {
+-				use open ':utf8';
+-				$filehdl->binmode(':utf8');
+-			} elsif ($self->{'binmode'} =~ /^:/) {
+-				$filehdl->binmode($self->{binmode}) or die "FATAL: can't use open layer $self->{binmode} in append_export_file()\n";
+-			}
+ 		}
+ 	}
+ 
+@@ -10920,13 +10899,14 @@ sub log_error_copy
+ 	}
+ 	$outfile .= $table . '_error.log';
+ 
+-	open(OUTERROR, ">>$outfile") or $self->logit("FATAL: can not write to $outfile, $!\n", 0, 1);
+-	print OUTERROR "$s_out";
++	my $filehdl = new IO::File;
++	$filehdl->open(">>$outfile") or $self->logit("FATAL: Can't write to $outfile: $!\n", 0, 1);
++	$filehdl->print($s_out);
+ 	foreach my $row (@$rows) {
+-		print OUTERROR join("\t", @$row), "\n";
++		$filehdl->print(join("\t", @$row) . "\n");
+ 	}
+-	print OUTERROR "\\.\n";
+-	close(OUTERROR);
++	$filehdl->print("\\.\n");
++	$filehdl->close();
+ 
+ }
+ 
+@@ -10940,9 +10920,10 @@ sub log_error_insert
+ 	}
+ 	$outfile .= $table . '_error.log';
+ 
+-	open(OUTERROR, ">>$outfile") or $self->logit("FATAL: can not write to $outfile, $!\n", 0, 1);
+-	print OUTERROR "$sql_out\n";
+-	close(OUTERROR);
++	my $filehdl = new IO::File;
++	$filehdl->open(">>$outfile") or $self->logit("FATAL: Can't write to $outfile: $!\n", 0, 1);
++	$filehdl->print("$sql_out\n");
++	$filehdl->close();
+ 
+ }
+ 
diff -Nru ora2pg-18.0/debian/patches/series ora2pg-17.6/debian/patches/series
--- ora2pg-18.0/debian/patches/series	2017-02-01 10:33:18.000000000 +0800
+++ ora2pg-17.6/debian/patches/series	2016-12-01 17:50:37.000000000 +0800
@@ -1,2 +1,4 @@
 01_Ora2Pg.pod.diff
 02_remove_unnecessary_files.diff
+cf81287191c1560b238fe2967560d8bff33e24b0.patch
+spelling-fixes.patch
diff -Nru ora2pg-18.0/debian/patches/spelling-fixes.patch ora2pg-17.6/debian/patches/spelling-fixes.patch
--- ora2pg-18.0/debian/patches/spelling-fixes.patch	1970-01-01 08:00:00.000000000 +0800
+++ ora2pg-17.6/debian/patches/spelling-fixes.patch	2016-12-01 17:50:37.000000000 +0800
@@ -0,0 +1,520 @@
+Description: Fix spelling
+Forwarded: https://github.com/darold/ora2pg/issues/242
+Author: gustavo panizzo <gfa@zumbi.com.ar>
+Last-Update: 2016-12-01
+--- a/doc/Ora2Pg.pod
++++ b/doc/Ora2Pg.pod
+@@ -21,7 +21,7 @@ database.
+ 
+ Ora2Pg consist of a Perl script (ora2pg) and a Perl module (Ora2Pg.pm), the
+ only thing you have to modify is the configuration file ora2pg.conf by setting
+-the DSN to the Oracle database and optionaly the name of a schema. Once that's
++the DSN to the Oracle database and optionally the name of a schema. Once that's
+ done you just have to set the type of export you want: TABLE with constraints,
+ VIEW, MVIEW, TABLESPACE, SEQUENCE, INDEXES, TRIGGER, GRANT, FUNCTION, PROCEDURE,
+ PACKAGE, PARTITION, TYPE, INSERT or COPY, FDW, QUERY, KETTLE, SYNONYM.
+@@ -49,7 +49,7 @@ Features included:
+ 	- Works on any plateform.
+ 	- Export Oracle tables as foreign data wrapper tables.
+ 	- Export materialized view.
+-	- Show a detailled report of an Oracle database content.
++	- Show a detailed report of an Oracle database content.
+ 	- Migration cost assessment of an Oracle database.
+ 	- Migration difficulty level assessment of an Oracle database.
+ 	- Migration cost assessment of PL/SQL code from a file.
+@@ -66,7 +66,7 @@ Features included:
+ Ora2Pg do its best to automatically convert your Oracle database to PostgreSQL
+ but there's still manual works to do. The Oracle specific PL/SQL code generated
+ for functions, procedures, packages and triggers has to be reviewed to match
+-the PostgreSQL syntax. You will find some useful recommandations on porting
++the PostgreSQL syntax. You will find some useful recommendations on porting
+ Oracle PL/SQL code to PostgreSQL PL/PGSQL at "Converting from other Databases
+ to PostgreSQL", section: Oracle (http://wiki.postgresql.org/wiki/Main_Page).
+ 
+@@ -109,7 +109,7 @@ host running Ora2Pg you can always trans
+ client installed. If you prefer to load export 'on the fly', the perl module
+ DBD::Pg is required.
+ 
+-Ora2Pg allow to dump all output int a compressed gzip file, to do that you need
++Ora2Pg allows one to dump all output int a compressed gzip file, to do that you need
+ the Compress::Zlib Perl module or if you prefer using bzip2 compression, the
+ program bzip2 must be available in your PATH.
+ 
+@@ -193,7 +193,7 @@ There's no specific order to place the c
+ set at the time they are read in the configuration file.
+ 
+ For configuration directives that just take a single value, you can use them
+-multiple time in the configuration file but only the last occurence found
++multiple time in the configuration file but only the last occurrence found
+ in the file will be used. For configuration directives that allow a list
+ of value, you can use it multiple time, the values will be appended to the
+ list. If you use the IMPORT directive to load a custom configuration file,
+@@ -247,7 +247,7 @@ Usage: ora2pg [-dhpqv --estimate_cost --
+     -p | --plsql      : Enable PLSQL to PLPSQL code conversion.
+     -P | --parallel num: Number of parallel tables to extract at the same time.
+     -q | --quiet      : disable progress bar.
+-    -s | --source DSN : Allow to set the Oracle DBI datasource.
++    -s | --source DSN : Allows one to set the Oracle DBI datasource.
+     -t | --type export: Used to set the export type. It will override the one
+ 			given in the configuration file (TYPE).
+     -T | --temp_dir DIR: use it to set a distinct temporary directory when two
+@@ -298,7 +298,7 @@ process have been interrupted and you've
+     "WARNING: an error occurs during data export. Please check what's happen."
+ Most of the time this is an OOM issue, you might first reduce DATA_LIMIT value.
+ 
+-For developpers, it is possible to add your own custom option(s) in the Perl
++For developers, it is possible to add your own custom option(s) in the Perl
+ script ora2pg as any configuration directive from ora2pg.conf can be passed
+ in lower case to the new Ora2Pg object instance. See ora2pg code on how to
+ add your own option.
+@@ -434,7 +434,7 @@ This directive did not control the Oracl
+ purely disable the use of any Oracle database by accepting a file as argument.
+ Set this directive to a file containing PL/SQL Oracle Code like function,
+ procedure or full package body to prevent Ora2Pg from connecting to an
+-Oracle database and just apply his convertion tool to the content of the
++Oracle database and just apply his conversion tool to the content of the
+ file. This can be used with the most of export types: TABLE, TRIGGER, PROCEDURE,
+ VIEW, FUNCTION or PACKAGE, etc.
+ 
+@@ -486,7 +486,7 @@ an ORA-XXX error, that mean that you DSN
+ the error and your settings and try again. The perl script says nothing and the
+ output file is empty: the user has not enough right to extract something from
+ the database. Try to connect Oracle as super user or take a look at directive
+-USER_GRANTS above and at next section, especiallly the SCHEMA directive.
++USER_GRANTS above and at next section, especially the SCHEMA directive.
+ 
+ =over 4
+ 
+@@ -566,7 +566,7 @@ search path will be set like this:
+ 
+ 	SET search_path = user_schema, public;
+ 
+-This will force to use an other schema than the one from Oracle schema and set
++This will force to use another schema than the one from Oracle schema and set
+ in the SCHEMA directive.
+ 
+ You can also set the default search_path for the connected PostgreSQL user by
+@@ -670,7 +670,7 @@ the new PostgreSQL database design.
+ Note that you can chained multiple export by giving to the TYPE directive a
+ comma-separated list of export type.
+ 
+-Ora2Pg will convert Oracle partition using table inheritence, trigger and
++Ora2Pg will convert Oracle partition using table inheritance, trigger and
+ functions. See document at Pg site:
+ http://www.postgresql.org/docs/current/interactive/ddl-partitioning.html
+ 
+@@ -707,7 +707,7 @@ Here is an example of the SHOW_COLUMN ou
+ Those extraction keyword are use to only display the requested information and
+ exit. This allow you to quickly know on what you are going to work.
+ 
+-The SHOW_COLUMN allow an other ora2pg command line option: '--allow relname'
++The SHOW_COLUMN allow another ora2pg command line option: '--allow relname'
+ or '-a relname' to limit the displayed information to the given table.
+ 
+ The SHOW_ENCODING export type will display the NLS_LANG and CLIENT_ENCODING
+@@ -729,10 +729,10 @@ Now you can use the table like a regular
+ See http://pgxn.org/dist/oracle_fdw/ for more information on this foreign data
+ wrapper.
+ 
+-Release 10 adds a new export type destinated to evaluate the content of the
++Release 10 adds a new export type destined to evaluate the content of the
+ database to migrate, in terms of objects and cost to end the migration:
+ 
+-	SHOW_REPORT  : show a detailled report of the Oracle database content.
++	SHOW_REPORT  : show a detailed report of the Oracle database content.
+ 
+ Here is a sample of report: http://ora2pg.darold.net/report.html
+ 
+@@ -743,8 +743,8 @@ about Migration Cost Evaluation.
+ 
+ Activate the migration cost evaluation. Must only be used with SHOW_REPORT,
+ FUNCTION, PROCEDURE, PACKAGE and QUERY export type. Default is disabled.
+-You may wat to use the --estimate_cost command line option instead to activate
+-this functionnality. Note that enabling this directive will force PLSQL_PGSQL
++You may want to use the --estimate_cost command line option instead to activate
++this functionality. Note that enabling this directive will force PLSQL_PGSQL
+ activation.
+ 
+ =item COST_UNIT_VALUE
+@@ -833,7 +833,7 @@ is arbitrary set to orcl. This only conc
+ 
+ =item EXTERNAL_TO_FDW
+ 
+-This directive, enabled by default, allow to export Oracle's External Tables as
++This directive, enabled by default, allows one to export Oracle's External Tables as
+ file_fdw foreign tables. To not export these tables at all, set the directive
+ to 0.
+ 
+@@ -873,7 +873,7 @@ valid regex into the list. For example:
+ 
+ 	ALLOW		EMPLOYEES SALE_.* COUNTRIES .*_GEOM_SEQ
+ 
+-will export objects with name EMPLOYEES, COUNTRIES, all objects begining with
++will export objects with name EMPLOYEES, COUNTRIES, all objects beginning with
+ 'SALE_' and all objects with a name ending by '_GEOM_SEQ'. The object depends
+ of the export type. Note that regex will not works with 8i database, you must
+ use the % placeholder instead, Ora2Pg will use the LIKE operator.
+@@ -895,13 +895,13 @@ Or a more complex form:
+ 		-e 'INDEX[emp_.*];CKEY[emp_salary_min]'
+ 
+ This command will export the definition of the employee table but will exclude
+-all index begining with 'emp_' and the CHECK contraint called 'emp_salary_min'.
++all index beginning with 'emp_' and the CHECK constraint called 'emp_salary_min'.
+ 
+ When exporting partition you can exclude some partition tables by using
+ 
+ 	ora2pg -p -c ora2pg.conf -t PARTITION -e 'PARTITION[PART_199.* PART_198.*]'
+ 
+-This will exclude partitionned tables for year 1980 to 1999 from the export but
++This will exclude partitioned tables for year 1980 to 1999 from the export but
+ not the main partition table. The trigger will also be adapted to exclude those
+ table.
+ 
+@@ -929,7 +929,7 @@ include valid regex into the list. For e
+ 
+ 	EXCLUDE		EMPLOYEES TMP_.* COUNTRIES
+ 
+-will exclude object with name EMPLOYEES, COUNTRIES and all tables begining with
++will exclude object with name EMPLOYEES, COUNTRIES and all tables beginning with
+ 'tmp_'.
+ 
+ For example, you can ban from export some unwanted function with this directive:
+@@ -937,7 +937,7 @@ For example, you can ban from export som
+ 	EXCLUDE		write_to_.* send_mail_.*
+ 
+ this example will exclude all functions, procedures or functions in a package
+-with the name begining with those regex. Note that regex will not works with
++with the name beginning with those regex. Note that regex will not works with
+ 8i database, you must use the % placeholder instead, Ora2Pg will use the NOT
+ LIKE operator.
+ 
+@@ -1006,7 +1006,7 @@ import on error.
+ 
+ Sometime you may want to extract data from an Oracle table but you need a
+ a custom query for that. Not just a "SELECT * FROM table" like Ora2Pg do
+-but a more complexe query. This directive allow you to overwrite the query
++but a more complex query. This directive allow you to overwrite the query
+ used by Ora2Pg to extract data. The format is TABLENAME[SQL_QUERY].
+ If you have multiple table to extract by replacing the Ora2Pg query, you can
+ define multiple REPLACE_QUERY lines.
+@@ -1059,7 +1059,7 @@ or INSERT export.
+ Some time you need to force the destination type, for example a column
+ exported as timestamp by Ora2Pg can be forced into type date. Value is
+ a comma-separated list of TABLE:COLUMN:TYPE structure. If you need to use
+-comma or space inside type definition you will have to backslach them.
++comma or space inside type definition you will have to backslash them.
+ 
+ 	MODIFY_TYPE	TABLE1:COL3:varchar,TABLE1:COL4:decimal(9\,6)
+ 
+@@ -1078,7 +1078,7 @@ DEST_TBNAME1 and DEST_TBNAME2
+ =item REPLACE_COLS
+ 
+ Like table name, the name of the column can be remapped to a different name
+-using the following syntaxe:
++using the following syntax:
+ 
+ 	REPLACE_COLS	ORIG_TBNAME(ORIG_COLNAME1:NEW_COLNAME1,ORIG_COLNAME2:NEW_COLNAME2)
+ 
+@@ -1154,7 +1154,7 @@ matching expressions (LIKE or POSIX regu
+ does not use the standard "C" locale. If you enable, with value 1, this will
+ force Ora2Pg to export all indexes defined on varchar2() and char() columns
+ using those operators. If you set it to a value greater than 1 it will only
+-change indexes on columns where the charactere limit is greater or equal than
++change indexes on columns where the character limit is greater or equal than
+ this value. For example, set it to 128 to create these kind of indexes on
+ columns of type varchar2(N) where N >= 128.
+ 
+@@ -1163,15 +1163,15 @@ columns of type varchar2(N) where N >= 1
+ Enable this directive if you want that your partition table name will be
+ exported using the parent table name. Disabled by default. If you have
+ multiple partitioned table, when exported to PostgreSQL some partitions
+-could have the same name but dfferent parent tables. This is not allowed,
++could have the same name but different parent tables. This is not allowed,
+ table name must be unique.
+ 
+ =item DISABLE_PARTITION
+ 
+ If you don't want to reproduce the partitioning like in Oracle and want to
+-export all partitionned Oracle data into the main single table in PostgreSQL
++export all partitioned Oracle data into the main single table in PostgreSQL
+ enable this directive. Ora2Pg will export all data into the main table name.
+-Default is to use partitionning, Ora2Pg will export data from each partition
++Default is to use partitioning, Ora2Pg will export data from each partition
+ and import them into the PostgreSQL dedicated partition table.
+ 
+ =back
+@@ -1195,7 +1195,7 @@ creation using for example:
+ If those Oracle constraints parameters are not set, the default is to export
+ those columns as generic type GEOMETRY to be able to receive any spatial type.
+ 
+-The AUTODETECT_SPATIAL_TYPE directive allow to force Ora2Pg to autodetect the
++The AUTODETECT_SPATIAL_TYPE directive allows one to force Ora2Pg to autodetect the
+ real spatial type and dimension used in a spatial column otherwise a non-
+ constrained "geometry" type is used. Enabling this feature will force Ora2Pg to
+ scan a sample of 50000 column to look at the GTYPE used. You can increase or
+@@ -1216,7 +1216,7 @@ with a two or three dimensional polygon.
+ 
+ =item CONVERT_SRID
+ 
+-This directive allow you to control the automatically convertion of Oracle
++This directive allow you to control the automatically conversion of Oracle
+ SRID to standard EPSG. If enabled, Ora2Pg will use the Oracle function
+ sdo_cs.map_oracle_srid_to_epsg() to convert all SRID. Enabled by default.
+ 
+@@ -1243,7 +1243,7 @@ This directive can take three values: WK
+ When it is set to WKT, Ora2Pg will use SDO_UTIL.TO_WKTGEOMETRY() to
+ extract the geometry data. When it is set to WKB, Ora2Pg will use the
+ binary output using SDO_UTIL.TO_WKBGEOMETRY(). If those two extract type
+-are calles at Oracle side, they are slow and you can easily reach Out Of
++are calls at Oracle side, they are slow and you can easily reach Out Of
+ Memory when you have lot of rows. Also WKB is not able to export 3D geometry
+ and some geometries like CURVEPOLYGON. In this case you may use the INTERNAL
+ extraction type. It will use a Pure Perl library to convert the SDO_GEOMETRY
+@@ -1284,7 +1284,7 @@ default: 10000
+ When Ora2Pg detect a table with some BLOB it will automatically reduce the
+ value of this directive by dividing it by 10 until his value is below 1000.
+ You can control this value by setting BLOB_LIMIT. Exporting BLOB use lot of
+-ressources, setting it to a too high value can produce OOM.
++resources, setting it to a too high value can produce OOM.
+ 
+ =item OUTPUT
+ 
+@@ -1296,7 +1296,7 @@ is installed for the .bz2 extension.
+ 
+ =item OUTPUT_DIR
+ 
+-Since release 7.0, you can define a base directory where wfile will be written.
++Since release 7.0, you can define a base directory where the file will be written.
+ The directory must exists.
+ 
+ =item BZIP2
+@@ -1359,7 +1359,7 @@ If this directive is set to 1, a TRUNCAT
+ loading data. This is usable only during INSERT or COPY export type.
+ 
+ When activated, the instruction will be added only if there's no global DELETE
+-clause or not one specific to the current table (see bellow).
++clause or not one specific to the current table (see below).
+ 
+ =item DELETE
+ 
+@@ -1384,7 +1384,7 @@ These DELETE clauses might be useful wit
+ 
+ Set this parameter to 0 to not include the call to \set ON_ERROR_STOP ON in
+ all SQL scripts generated by Ora2Pg. By default this order is always present
+-so that the script will immediatly abort when an error is encountered.
++so that the script will immediately abort when an error is encountered.
+ 
+ =item COPY_FREEZE
+ 
+@@ -1432,7 +1432,7 @@ set this directive to 1, ora2pg will not
+ 
+ =head2 Taking export under control
+ 
+-The following other configuration directives interact directly with the export process and give you fine granuality in database export control.
++The following other configuration directives interact directly with the export process and give you fine granularity in database export control.
+ 
+ =over 4
+ 
+@@ -1504,7 +1504,7 @@ to be created as deferrable and initiall
+ 
+ =item DROP_FKEY
+ 
+-If deferring foreign keys is not possible du to the amount of data in a
++If deferring foreign keys is not possible due to the amount of data in a
+ single transaction, you've not exported foreign keys as deferrable or you
+ are using direct import to PostgreSQL, you can use the DROP_FKEY directive.
+ 
+@@ -1567,7 +1567,7 @@ precision because using numeric(p,s) is
+ If set to 1 replace portable numeric type into PostgreSQL internal type.
+ Oracle data type NUMBER(p) or NUMBER are converted to smallint, integer
+ or bigint PostgreSQL data type following the length of the precision. If
+-NUMBER without precision are set to DEFAULT_NUMERIC (see bellow).
++NUMBER without precision are set to DEFAULT_NUMERIC (see below).
+ 
+ =item DEFAULT_NUMERIC
+ 
+@@ -1593,7 +1593,7 @@ destination type to BYTEA, the default,
+ BFILE as bytea. The third case is when you set the destination type to EFILE,
+ in this case, Ora2Pg will export it as an EFILE record: (DIRECTORY, FILENAME).
+ Use the DIRECTORY export type to export the existing directories as well as
+-priviledge on those directories.
++privileges on those directories.
+ 
+ 
+ There's no SQL function available to retrieve the path to the BFILE, then Ora2Pg
+@@ -1648,23 +1648,23 @@ This is a port of the BFILE Oracle type
+ 
+ If you want to convert CHAR(n) from Oracle into varchar(n) or text on PostgreSQL
+ using directive DATA_TYPE, you might want to do some triming on the data. By
+-default Ora2Pg will auto-detect this conversion and remove any withspace at both
++default Ora2Pg will auto-detect this conversion and remove any whitespace at both
+ leading and trailing position. If you just want to remove the leadings character
+ set the value to LEADING. If you just want to remove the trailing character, set
+ the value to TRAILING. Default value is BOTH.
+ 
+ =item TRIM_CHAR
+ 
+-The default triming character is space, use this directive if you need to
++The default trimming character is space, use this directive if you need to
+ change the character that will be removed. For example, set it to - if you
+-have leading - in the char(n) field. To use space as triming charger, comment
++have leading - in the char(n) field. To use space as trimming charger, comment
+ this directive, this is the default value.
+ 
+ =item PRESERVE_CASE
+ 
+ If you want to preserve the case of Oracle object name set this directive to 1.
+ By default Ora2Pg will convert all Oracle object names to lower case.  I do not
+-recommand to enable this unless you will always have to double-quote object
++recommend to enable this unless you will always have to double-quote object
+ names on all your SQL scripts.
+ 
+ =item ORA_RESERVED_WORDS
+@@ -1693,7 +1693,7 @@ to use the old style with table and a se
+ 
+ PostgreSQL version below 9.x do not support IF EXISTS in DDL statements.
+ Disabling the directive with value 0 will prevent Ora2Pg to add those
+-keywords in all generated statments. Default value is 1, enabled.
++keywords in all generated statements. Default value is 1, enabled.
+ 
+ =item PG_SUPPORTS_ROLE (Deprecated)
+ 
+@@ -1739,7 +1739,7 @@ Enabled by default.
+ 
+ =item BITMAP_AS_GIN
+ 
+-Use btree_gin extenstion to create bitmap like index with pg >= 9.4
++Use btree_gin extension to create bitmap like index with pg >= 9.4
+ You will need to create the extension by yourself:
+       create extension btree_gin;
+ Default is to create GIN index, when disabled, a btree index will be created
+@@ -1763,7 +1763,7 @@ Take a look at this page to learn more:
+ Important note: If you increase the value of this directive take care that
+ DATA_LIMIT will probably needs to be reduced. Even if you only have a 1MB blob,
+ trying to read 10000 of them (the default DATA_LIMIT) all at once will require
+-10GB of memory. You may extract data from those table separatly and set a
++10GB of memory. You may extract data from those table separately and set a
+ DATA_LIMIT to 500 or lower, otherwise you may experience some out of memory.
+ 
+ =item LONGTRUNKOK
+@@ -1776,7 +1776,7 @@ enough.
+ =item NO_LOB_LOCATOR
+ 
+ Disable this if you don't want to load full content of BLOB and CLOB and use
+-LOB locators instead. This is usefull to not having to set LONGREADLEN. Note
++LOB locators instead. This is useful to not having to set LONGREADLEN. Note
+ that this will not improve speed of BLOB export as most of the time is always
+ consumed by the bytea escaping and in this case data will be processed line
+ by line and not by chunk of DATA_LIMIT rows. For more information on how it
+@@ -1833,7 +1833,7 @@ with format; DDHH24MMSS::bigint, this de
+ =item NLS_LANG and NLS_NCHAR
+ 
+ By default Ora2Pg will set NLS_LANG to AMERICAN_AMERICA.AL32UTF8 and NLS_NCHAR
+-to AL32UTF8. It is not recommanded to change those settings but in some case it
++to AL32UTF8. It is not recommended to change those settings but in some case it
+ could be useful. Using your own settings with those configuration directive will
+ change the client encoding at Oracle side by setting the environment variables
+ $ENV{NLS_LANG} and $ENV{NLS_NCHAR}.
+@@ -1867,7 +1867,7 @@ You can take a look at the PostgreSQL su
+ 
+ =head2 PLSQL to PLPSQL convertion
+ 
+-Automatic code convertion from Oracle PLSQL to PostgreSQL PLPGSQL is a work in
++Automatic code conversion from Oracle PLSQL to PostgreSQL PLPGSQL is a work in
+ progress in Ora2Pg and surely you will always have manual work. The Perl code
+ used for automatic conversion is all stored in a specific Perl Module named
+ Ora2Pg/PLSQL.pm feel free to modify/add you own code and send me patches. The
+@@ -1878,7 +1878,7 @@ parameters rewrite.
+ 
+ =item PLSQL_PGSQL
+ 
+-Enable/disable PLSQL to PLPSQL convertion. Enabled by default.
++Enable/disable PLSQL to PLPSQL conversion. Enabled by default.
+ 
+ =item NULL_EQUAL_EMPTY
+ 
+@@ -1944,11 +1944,11 @@ then it adds the SQL code to create the
+ 	CREATE VIEW mviewname_mview AS
+ 	SELECT ... FROM ...;
+ 
+-	SELECT create_materialized_view('mviewname','mviewname_mview', change with the name of the colum to used for the index);
++	SELECT create_materialized_view('mviewname','mviewname_mview', change with the name of the column to used for the index);
+ 
+-The first argument is the name of the materializd view, the second the name of the view on which the materialized view is based
++The first argument is the name of the materialized view, the second the name of the view on which the materialized view is based
+ and the third is the column name on which the index should be build (aka most od the time the primary key). This column is not
+-automatically deduced so you need to repace its name.
++automatically deduced so you need to replace its name.
+ 
+ As said above Ora2Pg only supports snapshot materialized views so the table will be entirely refreshed by issuing first a truncate
+ of the table and then by load again all data from the view:
+@@ -2205,14 +2205,14 @@ Migration level: B-5
+         5 = difficult: stored functions and/or triggers with code rewriting
+ 
+ This assessment consist in a letter A or B to specify if the migration needs
+-manual rewritting or not. And a number from 1 up to 5 to give you a technical
++manual rewriting or not. And a number from 1 up to 5 to give you a technical
+ difficulty level. You have an additional option --human_days_limit to specify
+ the number of human-days limit where the migration level should be set to C
+-to indicate that it need a huge amount of work and a full project managment
++to indicate that it need a huge amount of work and a full project management
+ with migration support. Default is 10 human-days. You can use the configuration
+ directive HUMAN_DAYS_LIMIT to change this default value permanently.
+ 
+-This feature has been developped to help you or your boss to decide which
++This feature has been developed to help you or your boss to decide which
+ database to migrate first and the team that must be mobilized to operate
+ the migration.
+ 
+@@ -2224,7 +2224,7 @@ number of instances and schema to scan f
+ Usage: ora2pg_scanner -l CSVFILE [-o OUTDIR]
+ 
+    -l | --list FILE : CSV file containing a list of database to scan with
+-		all requiered information. The first line of the file
++		all required information. The first line of the file
+ 		can contains the following header that describe the
+ 		format that must be used:
+ 
+@@ -2235,7 +2235,7 @@ Usage: ora2pg_scanner -l CSVFILE [-o OUT
+ 		If you want to change the name of this directory, set the name
+ 		at second argument.
+ 
+-   -t | --test : just try all connection by retrieving the requiered schema
++   -t | --test : just try all connection by retrieving the required schema
+ 		 or database name. Useful to validate your CSV list file.
+ 
+    Here is a full example of a CSV database list file:
+@@ -2247,7 +2247,7 @@ Usage: ora2pg_scanner -l CSVFILE [-o OUT
+    The CSV field separator must be a comma.
+ 
+ It will generate a CSV file with the assessment result, one line per schema or
+-database and a detailled HTML report for each database scanned.
++database and a detailed HTML report for each database scanned.
+ 
+ Hint: Use the -t | --test option before to test all your connections in your
+ CSV file.
+@@ -2264,7 +2264,7 @@ The number of migration units associated
+ in the same Perl library lib/Ora2Pg/PLSQL.pm in the hash %UNCOVERED_SCORE initialization.
+ 
+ This assessment method is a work in progress so I'm expecting feedbacks on migration
+-experiences to polish the scores/units attribued in those variables.
++experiences to polish the scores/units attributed in those variables.
+ 
+ =head2 Improving indexes and constraints creation speed
+ 
+@@ -2295,7 +2295,7 @@ Oracle table:
+ 
+ need to be "translated" into a table using BLOB as follow:
+ 
+-	CREATE TABLE test_blob (id NUMNER, c1 BLOB);
++	CREATE TABLE test_blob (id NUMBER, c1 BLOB);
+ 
+ And then copy the data with the following INSERT query:
+ 
+@@ -2445,5 +2445,5 @@ Copyright (c) 2000-2016 Gilles Darold -
+ 
+ =head1 ACKNOWLEDGEMENT
+ 
+-I must thanks a lot all the great contributors, see changelog for all acknowledgements.
++I must thanks a lot all the great contributors, see changelog for all acknowledgments.
+ 
diff -Nru ora2pg-18.0/doc/Ora2Pg.pod ora2pg-17.6/doc/Ora2Pg.pod
--- ora2pg-18.0/doc/Ora2Pg.pod	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/doc/Ora2Pg.pod	2016-11-18 05:45:49.000000000 +0800
@@ -21,7 +21,7 @@
 
 Ora2Pg consist of a Perl script (ora2pg) and a Perl module (Ora2Pg.pm), the
 only thing you have to modify is the configuration file ora2pg.conf by setting
-the DSN to the Oracle database and optionally the name of a schema. Once that's
+the DSN to the Oracle database and optionaly the name of a schema. Once that's
 done you just have to set the type of export you want: TABLE with constraints,
 VIEW, MVIEW, TABLESPACE, SEQUENCE, INDEXES, TRIGGER, GRANT, FUNCTION, PROCEDURE,
 PACKAGE, PARTITION, TYPE, INSERT or COPY, FDW, QUERY, KETTLE, SYNONYM.
@@ -66,7 +66,7 @@
 Ora2Pg do its best to automatically convert your Oracle database to PostgreSQL
 but there's still manual works to do. The Oracle specific PL/SQL code generated
 for functions, procedures, packages and triggers has to be reviewed to match
-the PostgreSQL syntax. You will find some useful recommendations on porting
+the PostgreSQL syntax. You will find some useful recommandations on porting
 Oracle PL/SQL code to PostgreSQL PL/PGSQL at "Converting from other Databases
 to PostgreSQL", section: Oracle (http://wiki.postgresql.org/wiki/Main_Page).
 
@@ -90,15 +90,11 @@
 
 =head2 Requirement
 
-You need a modern Perl distribution (perl 5.10 and more). To connect to a
-database and proceed to his migration you need the DBI Perl module > 1.614.
-To migrate an Oracle database you need the DBD::Oracle Perl modules to be
-installed. To migrate a MySQL database you need the DBD::MySQL Perl modules.
-These modules are used to connect to the database but they are not mandatory
-if you want to migrate DDL input files.
-
-To install DBD::Oracle and have it working you need to have the Oracle client
-libraries installed and the ORACLE_HOME environment variable must be defined.
+You need a modern Perl distribution (perl 5.10 and more), the DBI > 1.614 and
+DBD::Oracle Perl modules to be installed. These are used to connect to the
+Oracle database. To install DBD::Oracle and have it working you need to have
+the Oracle client libraries installed and the ORACLE_HOME environment variable
+must be defined.
 
 If you plan to export a MySQL database you need to install the Perl module
 DBD::mysql which require that the mysql client libraries are installed.
@@ -113,7 +109,7 @@
 client installed. If you prefer to load export 'on the fly', the perl module
 DBD::Pg is required.
 
-Ora2Pg allow to dump all output in a compressed gzip file, to do that you need
+Ora2Pg allow to dump all output int a compressed gzip file, to do that you need
 the Compress::Zlib Perl module or if you prefer using bzip2 compression, the
 program bzip2 must be available in your PATH.
 
@@ -121,8 +117,8 @@
 
 Like any other Perl Module Ora2Pg can be installed with the following commands:
 
-	tar xzf ora2pg-x.x.tar.gz
-	cd ora2pg-x.x/
+	tar xzf ora2pg-10.x.tar.gz
+	cd ora2pg-10.x/
 	perl Makefile.PL
 	make && make install
 
@@ -197,7 +193,7 @@
 set at the time they are read in the configuration file.
 
 For configuration directives that just take a single value, you can use them
-multiple time in the configuration file but only the last occurrence found
+multiple time in the configuration file but only the last occurence found
 in the file will be used. For configuration directives that allow a list
 of value, you can use it multiple time, the values will be appended to the
 list. If you use the IMPORT directive to load a custom configuration file,
@@ -234,7 +230,6 @@
     -c | --conf file  : Used to set an alternate configuration file than the
 			default /etc/ora2pg/ora2pg.conf.
     -d | --debug      : Enable verbose output.
-    -D | --data_type STR : Allow custom type replacement at command line.
     -e | --exclude str: coma separated list of objects to exclude from export.
 			Can be used with SHOW_COLUMN too.
     -h | --help       : Print this short help.
@@ -303,7 +298,7 @@
     "WARNING: an error occurs during data export. Please check what's happen."
 Most of the time this is an OOM issue, you might first reduce DATA_LIMIT value.
 
-For developers, it is possible to add your own custom option(s) in the Perl
+For developpers, it is possible to add your own custom option(s) in the Perl
 script ora2pg as any configuration directive from ora2pg.conf can be passed
 in lower case to the new Ora2Pg object instance. See ora2pg code on how to
 add your own option.
@@ -439,7 +434,7 @@
 purely disable the use of any Oracle database by accepting a file as argument.
 Set this directive to a file containing PL/SQL Oracle Code like function,
 procedure or full package body to prevent Ora2Pg from connecting to an
-Oracle database and just apply his conversion tool to the content of the
+Oracle database and just apply his convertion tool to the content of the
 file. This can be used with the most of export types: TABLE, TRIGGER, PROCEDURE,
 VIEW, FUNCTION or PACKAGE, etc.
 
@@ -491,7 +486,7 @@
 the error and your settings and try again. The perl script says nothing and the
 output file is empty: the user has not enough right to extract something from
 the database. Try to connect Oracle as super user or take a look at directive
-USER_GRANTS above and at next section, especially the SCHEMA directive.
+USER_GRANTS above and at next section, especiallly the SCHEMA directive.
 
 =over 4
 
@@ -675,7 +670,7 @@
 Note that you can chained multiple export by giving to the TYPE directive a
 comma-separated list of export type.
 
-Ora2Pg will convert Oracle partition using table inheritance, trigger and
+Ora2Pg will convert Oracle partition using table inheritence, trigger and
 functions. See document at Pg site:
 http://www.postgresql.org/docs/current/interactive/ddl-partitioning.html
 
@@ -734,7 +729,7 @@
 See http://pgxn.org/dist/oracle_fdw/ for more information on this foreign data
 wrapper.
 
-Release 10 adds a new export type destined to evaluate the content of the
+Release 10 adds a new export type destinated to evaluate the content of the
 database to migrate, in terms of objects and cost to end the migration:
 
 	SHOW_REPORT  : show a detailled report of the Oracle database content.
@@ -748,8 +743,8 @@
 
 Activate the migration cost evaluation. Must only be used with SHOW_REPORT,
 FUNCTION, PROCEDURE, PACKAGE and QUERY export type. Default is disabled.
-You may want to use the --estimate_cost command line option instead to activate
-this functionality. Note that enabling this directive will force PLSQL_PGSQL
+You may wat to use the --estimate_cost command line option instead to activate
+this functionnality. Note that enabling this directive will force PLSQL_PGSQL
 activation.
 
 =item COST_UNIT_VALUE
@@ -878,7 +873,7 @@
 
 	ALLOW		EMPLOYEES SALE_.* COUNTRIES .*_GEOM_SEQ
 
-will export objects with name EMPLOYEES, COUNTRIES, all objects beginning with
+will export objects with name EMPLOYEES, COUNTRIES, all objects begining with
 'SALE_' and all objects with a name ending by '_GEOM_SEQ'. The object depends
 of the export type. Note that regex will not works with 8i database, you must
 use the % placeholder instead, Ora2Pg will use the LIKE operator.
@@ -900,13 +895,13 @@
 		-e 'INDEX[emp_.*];CKEY[emp_salary_min]'
 
 This command will export the definition of the employee table but will exclude
-all index beginning with 'emp_' and the CHECK constraint called 'emp_salary_min'.
+all index begining with 'emp_' and the CHECK contraint called 'emp_salary_min'.
 
 When exporting partition you can exclude some partition tables by using
 
 	ora2pg -p -c ora2pg.conf -t PARTITION -e 'PARTITION[PART_199.* PART_198.*]'
 
-This will exclude partitioned tables for year 1980 to 1999 from the export but
+This will exclude partitionned tables for year 1980 to 1999 from the export but
 not the main partition table. The trigger will also be adapted to exclude those
 table.
 
@@ -934,7 +929,7 @@
 
 	EXCLUDE		EMPLOYEES TMP_.* COUNTRIES
 
-will exclude object with name EMPLOYEES, COUNTRIES and all tables beginning with
+will exclude object with name EMPLOYEES, COUNTRIES and all tables begining with
 'tmp_'.
 
 For example, you can ban from export some unwanted function with this directive:
@@ -942,7 +937,7 @@
 	EXCLUDE		write_to_.* send_mail_.*
 
 this example will exclude all functions, procedures or functions in a package
-with the name beginning with those regex. Note that regex will not works with
+with the name begining with those regex. Note that regex will not works with
 8i database, you must use the % placeholder instead, Ora2Pg will use the NOT
 LIKE operator.
 
@@ -1011,122 +1006,19 @@
 
 Sometime you may want to extract data from an Oracle table but you need a
 a custom query for that. Not just a "SELECT * FROM table" like Ora2Pg do
-but a more complex query. This directive allow you to overwrite the query
+but a more complexe query. This directive allow you to overwrite the query
 used by Ora2Pg to extract data. The format is TABLENAME[SQL_QUERY].
 If you have multiple table to extract by replacing the Ora2Pg query, you can
 define multiple REPLACE_QUERY lines.
 
 	REPLACE_QUERY	EMPLOYEES[SELECT e.id,e.fisrtname,lastname FROM EMPLOYEES e JOIN EMP_UPDT u ON (e.id=u.id AND u.cdate>'2014-08-01 00:00:00')]
 
-=back
-
-=head2 Controm of Full Text Search export
-
-Several directives can be used to control the way Ora2Pg will export the
-Oracle's Text search indexes. By default CONTEXT indexes will be exported
-to PostgreSQL FTS indexes but CTXCAT indexes wikk be exported as indexes
-using the pg_trgm extension.
-
-=over 4
-
 =item CONTEXT_AS_TRGM
 
 Force Ora2Pg to translate Oracle Text indexes into PostgreSQL indexes using
 pg_trgm extension. Default is to translate CONTEXT indexes into FTS indexes
 and CTXCAT indexes using pg_trgm. Most of the time using pg_trgm is enough,
-this is why this directive stand for. You need to create the pg_trgm extension
-into the destination database before importing the objects:
-
-	CREATE EXTENSION pg_trgm;
-
-=item FTS_INDEX_ONLY
-
-By default Ora2Pg creates a function-based index to translate Oracle Text
-indexes. 
-
-	CREATE INDEX ON t_document
-		USING gin(to_tsvector('pg_catalog.french', title));
-
-You will have to rewrite the CONTAIN() clause using to_tsvector(), example:
-
-	SELECT id,title FROM t_document
-		WHERE to_tsvector(title)) @@ to_tsquery('search_word');
-
-To force Ora2Pg to create an extra tsvector column with a dedicated triggers
-for FTS indexes, disable this directive. In this case, Ora2Pg will add the
-column as follow: ALTER TABLE t_document ADD COLUMN tsv_title tsvector;
-Then update the column to compute FTS vectors if data have been loaded before
-	    UPDATE t_document SET tsv_title =
-		to_tsvector('pg_catalog.french', coalesce(title,''));
-To automatically update the column when a modification in the title column
-appears, Ora2Pg adds the following trigger:
-
-	CREATE FUNCTION tsv_t_document_title() RETURNS trigger AS $$
-	BEGIN
-	       IF TG_OP = 'INSERT' OR new.title != old.title THEN
-		       new.tsv_title :=
-		       to_tsvector('pg_catalog.french', coalesce(new.title,''));
-	       END IF;
-	       return new;
-	END
-	$$ LANGUAGE plpgsql;
-	CREATE TRIGGER trig_tsv_t_document_title BEFORE INSERT OR UPDATE
-	 ON t_document
-	 FOR EACH ROW EXECUTE PROCEDURE tsv_t_document_title();
-
-When the Oracle text index is defined over multiple column, Ora2Pg will use
-setweight() to set a weight in the order of the column declaration.
-
-=item FTS_CONFIG
-
-Use this directive to force text search configuration to use. When it is not
-set, Ora2Pg will autodetect the stemmer used by Oracle for each index and
-pg_catalog.english if the information is not found. 
-
-
-=item USE_UNACCENT
-
-If you want to perform your text search in an accent insensitive way, enable
-this directive. Ora2Pg will create an helper function over unaccent() and
-creates the pg_trgm indexes using this function. With FTS Ora2Pg will
-redefine your text search configuration, for example:
-
-      CREATE TEXT SEARCH CONFIGURATION fr (COPY = french); 
-      ALTER TEXT SEARCH CONFIGURATION fr
-              ALTER MAPPING FOR hword, hword_part, word WITH unaccent, french_stem;
-
-then set the FTS_CONFIG ora2pg.conf directive to fr instead of pg_catalog.english.
-
-When enabled, Ora2pg will create the wrapper function:
-
-      CREATE OR REPLACE FUNCTION unaccent_immutable(text)
-      RETURNS text AS
-      $$
-          SELECT public.unaccent('public.unaccent', $1);
-      $$ LANGUAGE sql IMMUTABLE
-	 COST 1;
-
-the indexes are exported as follow:
-
-      CREATE INDEX t_document_title_unaccent_trgm_idx ON t_document 
-          USING gin (unaccent_immutable(title) gin_trgm_ops);
-
-In your queries you will need to use the same function in the search to
-be able to use the function-based index. Example:
-
-	SELECT * FROM t_document
-		WHERE unaccent_immutable(title) LIKE '%donnees%';
-
-=item USE_LOWER_UNACCENT
-
-Same as above but call lower() in the unaccent_immutable() function:
-
-      CREATE OR REPLACE FUNCTION unaccent_immutable(text)
-      RETURNS text AS
-      $$
-          SELECT lower(public.unaccent('public.unaccent', $1));
-      $$ LANGUAGE sql IMMUTABLE;
-
+this is why this directive stand for.
 
 =back
 
@@ -1162,6 +1054,18 @@
 'id' and 'fichier' from the T_TEST2 table. This directive is only used with COPY
 or INSERT export.
 
+=item MODIFY_TYPE
+
+Some time you need to force the destination type, for example a column
+exported as timestamp by Ora2Pg can be forced into type date. Value is
+a comma-separated list of TABLE:COLUMN:TYPE structure. If you need to use
+comma or space inside type definition you will have to backslach them.
+
+	MODIFY_TYPE	TABLE1:COL3:varchar,TABLE1:COL4:decimal(9\,6)
+
+Type of table1.col3 will be replaced by a varchar and table1.col4 by
+a decimal with precision.
+
 =item REPLACE_TABLES
 
 This directive allow you to remap a list of Oracle table name to a PostgreSQL table name during export. The value is a list of space-separated values with the following structure:
@@ -1174,7 +1078,7 @@
 =item REPLACE_COLS
 
 Like table name, the name of the column can be remapped to a different name
-using the following syntax:
+using the following syntaxe:
 
 	REPLACE_COLS	ORIG_TBNAME(ORIG_COLNAME1:NEW_COLNAME1,ORIG_COLNAME2:NEW_COLNAME2)
 
@@ -1250,7 +1154,7 @@
 does not use the standard "C" locale. If you enable, with value 1, this will
 force Ora2Pg to export all indexes defined on varchar2() and char() columns
 using those operators. If you set it to a value greater than 1 it will only
-change indexes on columns where the character limit is greater or equal than
+change indexes on columns where the charactere limit is greater or equal than
 this value. For example, set it to 128 to create these kind of indexes on
 columns of type varchar2(N) where N >= 128.
 
@@ -1259,15 +1163,15 @@
 Enable this directive if you want that your partition table name will be
 exported using the parent table name. Disabled by default. If you have
 multiple partitioned table, when exported to PostgreSQL some partitions
-could have the same name but different parent tables. This is not allowed,
+could have the same name but dfferent parent tables. This is not allowed,
 table name must be unique.
 
 =item DISABLE_PARTITION
 
 If you don't want to reproduce the partitioning like in Oracle and want to
-export all partitioned Oracle data into the main single table in PostgreSQL
+export all partitionned Oracle data into the main single table in PostgreSQL
 enable this directive. Ora2Pg will export all data into the main table name.
-Default is to use partitioning, Ora2Pg will export data from each partition
+Default is to use partitionning, Ora2Pg will export data from each partition
 and import them into the PostgreSQL dedicated partition table.
 
 =back
@@ -1312,7 +1216,7 @@
 
 =item CONVERT_SRID
 
-This directive allow you to control the automatically conversion of Oracle
+This directive allow you to control the automatically convertion of Oracle
 SRID to standard EPSG. If enabled, Ora2Pg will use the Oracle function
 sdo_cs.map_oracle_srid_to_epsg() to convert all SRID. Enabled by default.
 
@@ -1339,7 +1243,7 @@
 When it is set to WKT, Ora2Pg will use SDO_UTIL.TO_WKTGEOMETRY() to
 extract the geometry data. When it is set to WKB, Ora2Pg will use the
 binary output using SDO_UTIL.TO_WKBGEOMETRY(). If those two extract type
-are calls at Oracle side, they are slow and you can easily reach Out Of
+are calles at Oracle side, they are slow and you can easily reach Out Of
 Memory when you have lot of rows. Also WKB is not able to export 3D geometry
 and some geometries like CURVEPOLYGON. In this case you may use the INTERNAL
 extraction type. It will use a Pure Perl library to convert the SDO_GEOMETRY
@@ -1380,7 +1284,7 @@
 When Ora2Pg detect a table with some BLOB it will automatically reduce the
 value of this directive by dividing it by 10 until his value is below 1000.
 You can control this value by setting BLOB_LIMIT. Exporting BLOB use lot of
-resources, setting it to a too high value can produce OOM.
+ressources, setting it to a too high value can produce OOM.
 
 =item OUTPUT
 
@@ -1392,7 +1296,7 @@
 
 =item OUTPUT_DIR
 
-Since release 7.0, you can define a base directory where the file will be written.
+Since release 7.0, you can define a base directory where wfile will be written.
 The directory must exists.
 
 =item BZIP2
@@ -1480,7 +1384,7 @@
 
 Set this parameter to 0 to not include the call to \set ON_ERROR_STOP ON in
 all SQL scripts generated by Ora2Pg. By default this order is always present
-so that the script will immediately abort when an error is encountered.
+so that the script will immediatly abort when an error is encountered.
 
 =item COPY_FREEZE
 
@@ -1526,127 +1430,9 @@
 
 =back
 
-=head2 Column tytpe control
-
-=over 4
-
-=item PG_NUMERIC_TYPE
-
-If set to 1 replace portable numeric type into PostgreSQL internal type.
-Oracle data type NUMBER(p,s) is approximatively converted to real and
-float PostgreSQL data type. If you have monetary fields or don't want
-rounding issues with the extra decimals you should preserve the same
-numeric(p,s) PostgreSQL data type. Do that only if you need very good
-precision because using numeric(p,s) is slower than using real or double.
-
-=item PG_INTEGER_TYPE
-
-If set to 1 replace portable numeric type into PostgreSQL internal type.
-Oracle data type NUMBER(p) or NUMBER are converted to smallint, integer
-or bigint PostgreSQL data type following the length of the precision. If
-NUMBER without precision are set to DEFAULT_NUMERIC (see bellow).
-
-=item DEFAULT_NUMERIC
-
-NUMBER without precision are converted by default to bigint only if
-PG_INTEGER_TYPE is true. You can overwrite this value to any PG type,
-like integer or float.
-
-=item DATA_TYPE
-
-If you're experiencing any problem in data type schema conversion with this
-directive you can take full control of the correspondence between Oracle and
-PostgreSQL types to redefine data type translation used in Ora2pg. The syntax
-is a comma-separated list of "Oracle datatype:Postgresql datatype". Here are
-the default list used:
-
-	DATA_TYPE	DATE:timestamp,LONG:text,LONG RAW:bytea,CLOB:text,NCLOB:text,BLOB:bytea,BFILE:bytea,RAW:bytea,ROWID:oid,FLOAT:double precision,DEC:decimal,DECIMAL:decimal,DOUBLE PRECISION:double precision,INT:numeric,INTEGER:numeric,REAL:real,SMALLINT:smallint,BINARY_FLOAT:double precision,BINARY_DOUBLE:double precision,TIMESTAMP:timestamp,XMLTYPE:xml,BINARY_INTEGER:integer,PLS_INTEGER:integer,TIMESTAMP WITH TIME ZONE:timestamp with time zone,TIMESTAMP WITH LOCAL TIME ZONE:timestamp with time zone
-
-Note that the directive and the list definition must be a single line.
-
-If you want to replace a type with a precision and scale you need to escape
-the coma with a backslash. For example, if you want to replace all NUMBER(*,0)
-into bigint instead of numeric(38) add the following:
-
-       DATA_TYPE       NUMBER(*\,0):bigint
-
-You don't have to recopy all default type conversion but just the one you want
-to rewrite.
-
-There's a special case with BFILE when they are converted to type TEXT, they
-will just contains the full path to the external file. If you set the
-destination type to BYTEA, the default, Ora2Pg will export the content of the
-BFILE as bytea. The third case is when you set the destination type to EFILE,
-in this case, Ora2Pg will export it as an EFILE record: (DIRECTORY, FILENAME).
-Use the DIRECTORY export type to export the existing directories as well as
-privileges on those directories.
-
-
-There's no SQL function available to retrieve the path to the BFILE, then Ora2Pg
-have to create one using the DBMS_LOB package.
-
-	CREATE OR REPLACE FUNCTION ora2pg_get_bfilename( p_bfile IN BFILE )
-	RETURN VARCHAR2
-	  AS
-	    l_dir   VARCHAR2(4000);
-	    l_fname VARCHAR2(4000);
-	    l_path  VARCHAR2(4000);
-	  BEGIN
-	    dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
-	    SELECT directory_path INTO l_path FROM all_directories
-		WHERE directory_name = l_dir;
-	    l_dir := rtrim(l_path,'/');
-	    RETURN l_dir || '/' || l_fname;
-	  END;
-
-This function is only created if Ora2Pg found a table with a BFILE column and
-that the destination type is TEXT. The function is dropped at the end of the
-export. This concern both, COPY and INSERT export type.
-
-There's no SQL function available to retrieve BFILE as an EFILE record, then
-Ora2Pg have to create one using the DBMS_LOB package.
-
-	CREATE OR REPLACE FUNCTION ora2pg_get_efile( p_bfile IN BFILE )
-	RETURN VARCHAR2
-	  AS
-	    l_dir   VARCHAR2(4000);
-	    l_fname VARCHAR2(4000);
-	  BEGIN
-	    dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
-	    RETURN '(' || l_dir || ',' || l_fnamei || ')';
-	  END;
-
-This function is only created if Ora2Pg found a table with a BFILE column and
-that the destination type is EFILE. The function is dropped at the end of the
-export. This concern both, COPY and INSERT export type.
-
-To set the destination type, use the DATA_TYPE configuration directive:
-
-	DATA_TYPE	BFILE:EFILE
-
-for example.
-
-The EFILE type is a user defined type created by the PostgreSQL extension
-external_file that can be found here: https://github.com/darold/external_file
-This is a port of the BFILE Oracle type to PostgreSQL.
-
-=item MODIFY_TYPE
-
-Some time you need to force the destination type, for example a column
-exported as timestamp by Ora2Pg can be forced into type date. Value is
-a comma-separated list of TABLE:COLUMN:TYPE structure. If you need to use
-comma or space inside type definition you will have to backslash them.
-
-	MODIFY_TYPE	TABLE1:COL3:varchar,TABLE1:COL4:decimal(9\,6)
-
-Type of table1.col3 will be replaced by a varchar and table1.col4 by
-a decimal with precision.
-
-=back
-
 =head2 Taking export under control
 
-The following other configuration directives interact directly with the export process and give you fine granularity in database export control.
+The following other configuration directives interact directly with the export process and give you fine granuality in database export control.
 
 =over 4
 
@@ -1718,7 +1504,7 @@
 
 =item DROP_FKEY
 
-If deferring foreign keys is not possible due to the amount of data in a
+If deferring foreign keys is not possible du to the amount of data in a
 single transaction, you've not exported foreign keys as deferrable or you
 are using direct import to PostgreSQL, you can use the DROP_FKEY directive.
 
@@ -1767,27 +1553,118 @@
 This directive is only used during data export to build INSERT statements.
 See NOESCAPE for enabling/disabling escape in COPY statements.
 
+=item PG_NUMERIC_TYPE
+
+If set to 1 replace portable numeric type into PostgreSQL internal type.
+Oracle data type NUMBER(p,s) is approximatively converted to real and
+float PostgreSQL data type. If you have monetary fields or don't want
+rounding issues with the extra decimals you should preserve the same
+numeric(p,s) PostgreSQL data type. Do that only if you need very good
+precision because using numeric(p,s) is slower than using real or double.
+
+=item PG_INTEGER_TYPE
+
+If set to 1 replace portable numeric type into PostgreSQL internal type.
+Oracle data type NUMBER(p) or NUMBER are converted to smallint, integer
+or bigint PostgreSQL data type following the length of the precision. If
+NUMBER without precision are set to DEFAULT_NUMERIC (see bellow).
+
+=item DEFAULT_NUMERIC
+
+NUMBER without precision are converted by default to bigint only if
+PG_INTEGER_TYPE is true. You can overwrite this value to any PG type,
+like integer or float.
+
+=item DATA_TYPE
+
+If you're experiencing any problem in data type schema conversion with this
+directive you can take full control of the correspondence between Oracle and
+PostgreSQL types to redefine data type translation used in Ora2pg. The syntax
+is a comma-separated list of "Oracle datatype:Postgresql datatype". Here are
+the default list used:
+
+	DATA_TYPE	DATE:timestamp,LONG:text,LONG RAW:bytea,CLOB:text,NCLOB:text,BLOB:bytea,BFILE:bytea,RAW:bytea,ROWID:oid,FLOAT:double precision,DEC:decimal,DECIMAL:decimal,DOUBLE PRECISION:double precision,INT:numeric,INTEGER:numeric,REAL:real,SMALLINT:smallint,BINARY_FLOAT:double precision,BINARY_DOUBLE:double precision,TIMESTAMP:timestamp,XMLTYPE:xml,BINARY_INTEGER:integer,PLS_INTEGER:integer,TIMESTAMP WITH TIME ZONE:timestamp with time zone,TIMESTAMP WITH LOCAL TIME ZONE:timestamp with time zone
+
+Note that the directive and the list definition must be a single line.
+
+There's a special case with BFILE when they are converted to type TEXT, they
+will just contains the full path to the external file. If you set the
+destination type to BYTEA, the default, Ora2Pg will export the content of the
+BFILE as bytea. The third case is when you set the destination type to EFILE,
+in this case, Ora2Pg will export it as an EFILE record: (DIRECTORY, FILENAME).
+Use the DIRECTORY export type to export the existing directories as well as
+priviledge on those directories.
+
+
+There's no SQL function available to retrieve the path to the BFILE, then Ora2Pg
+have to create one using the DBMS_LOB package.
+
+	CREATE OR REPLACE FUNCTION ora2pg_get_bfilename( p_bfile IN BFILE )
+	RETURN VARCHAR2
+	  AS
+	    l_dir   VARCHAR2(4000);
+	    l_fname VARCHAR2(4000);
+	    l_path  VARCHAR2(4000);
+	  BEGIN
+	    dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
+	    SELECT directory_path INTO l_path FROM all_directories
+		WHERE directory_name = l_dir;
+	    l_dir := rtrim(l_path,'/');
+	    RETURN l_dir || '/' || l_fname;
+	  END;
+
+This function is only created if Ora2Pg found a table with a BFILE column and
+that the destination type is TEXT. The function is dropped at the end of the
+export. This concern both, COPY and INSERT export type.
+
+There's no SQL function available to retrieve BFILE as an EFILE record, then
+Ora2Pg have to create one using the DBMS_LOB package.
+
+	CREATE OR REPLACE FUNCTION ora2pg_get_efile( p_bfile IN BFILE )
+	RETURN VARCHAR2
+	  AS
+	    l_dir   VARCHAR2(4000);
+	    l_fname VARCHAR2(4000);
+	  BEGIN
+	    dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
+	    RETURN '(' || l_dir || ',' || l_fnamei || ')';
+	  END;
+
+This function is only created if Ora2Pg found a table with a BFILE column and
+that the destination type is EFILE. The function is dropped at the end of the
+export. This concern both, COPY and INSERT export type.
+
+To set the destination type, use the DATA_TYPE configuration directive:
+
+	DATA_TYPE	BFILE:EFILE
+
+for example.
+
+The EFILE type is a user defined type created by the PostgreSQL extension
+external_file that can be found here: https://github.com/darold/external_file
+This is a port of the BFILE Oracle type to PostgreSQL.
+
 =item TRIM_TYPE
 
 If you want to convert CHAR(n) from Oracle into varchar(n) or text on PostgreSQL
 using directive DATA_TYPE, you might want to do some triming on the data. By
-default Ora2Pg will auto-detect this conversion and remove any whitespace at both
+default Ora2Pg will auto-detect this conversion and remove any withspace at both
 leading and trailing position. If you just want to remove the leadings character
 set the value to LEADING. If you just want to remove the trailing character, set
 the value to TRAILING. Default value is BOTH.
 
 =item TRIM_CHAR
 
-The default trimming character is space, use this directive if you need to
+The default triming character is space, use this directive if you need to
 change the character that will be removed. For example, set it to - if you
-have leading - in the char(n) field. To use space as trimming charger, comment
+have leading - in the char(n) field. To use space as triming charger, comment
 this directive, this is the default value.
 
 =item PRESERVE_CASE
 
 If you want to preserve the case of Oracle object name set this directive to 1.
 By default Ora2Pg will convert all Oracle object names to lower case.  I do not
-recommend to enable this unless you will always have to double-quote object
+recommand to enable this unless you will always have to double-quote object
 names on all your SQL scripts.
 
 =item ORA_RESERVED_WORDS
@@ -1816,7 +1693,7 @@
 
 PostgreSQL version below 9.x do not support IF EXISTS in DDL statements.
 Disabling the directive with value 0 will prevent Ora2Pg to add those
-keywords in all generated statements. Default value is 1, enabled.
+keywords in all generated statments. Default value is 1, enabled.
 
 =item PG_SUPPORTS_ROLE (Deprecated)
 
@@ -1862,7 +1739,7 @@
 
 =item BITMAP_AS_GIN
 
-Use btree_gin extension to create bitmap like index with pg >= 9.4
+Use btree_gin extenstion to create bitmap like index with pg >= 9.4
 You will need to create the extension by yourself:
       create extension btree_gin;
 Default is to create GIN index, when disabled, a btree index will be created
@@ -1899,7 +1776,7 @@
 =item NO_LOB_LOCATOR
 
 Disable this if you don't want to load full content of BLOB and CLOB and use
-LOB locators instead. This is useful to not having to set LONGREADLEN. Note
+LOB locators instead. This is usefull to not having to set LONGREADLEN. Note
 that this will not improve speed of BLOB export as most of the time is always
 consumed by the bytea escaping and in this case data will be processed line
 by line and not by chunk of DATA_LIMIT rows. For more information on how it
@@ -1990,7 +1867,7 @@
 
 =head2 PLSQL to PLPSQL convertion
 
-Automatic code conversion from Oracle PLSQL to PostgreSQL PLPGSQL is a work in
+Automatic code convertion from Oracle PLSQL to PostgreSQL PLPGSQL is a work in
 progress in Ora2Pg and surely you will always have manual work. The Perl code
 used for automatic conversion is all stored in a specific Perl Module named
 Ora2Pg/PLSQL.pm feel free to modify/add you own code and send me patches. The
@@ -2001,7 +1878,7 @@
 
 =item PLSQL_PGSQL
 
-Enable/disable PLSQL to PLPSQL conversion. Enabled by default.
+Enable/disable PLSQL to PLPSQL convertion. Enabled by default.
 
 =item NULL_EQUAL_EMPTY
 
@@ -2035,20 +1912,6 @@
 The replacement will be done in all kind of DDL or code that is parsed by
 the PLSQL to PLPGSQL converter. PLSQL_PGSQL must be enabled or -p used in
 command line.
-
-=item REWRITE_OUTER_JOIN
-
-Enable this directive if the rewrite of Oracle native syntax (+) of
-OUTER JOIN is broken. This will force Ora2Pg to not rewrite such code,
-default is to try to rewrite simple form of rigth outer join for the
-moment.
-
-=item UUID_FUNCTION
-
-By default Ora2Pg will convert call to SYS_GUID() Oracle function
-with a call to uuid_generate_v4 from uuid-ossp extension. You can
-redefined it to use the gen_random_uuid function from pgcrypto
-extension by changing the function name. Default to uuid_generate_v4.
  
 =back
 
@@ -2081,11 +1944,11 @@
 	CREATE VIEW mviewname_mview AS
 	SELECT ... FROM ...;
 
-	SELECT create_materialized_view('mviewname','mviewname_mview', change with the name of the column to used for the index);
+	SELECT create_materialized_view('mviewname','mviewname_mview', change with the name of the colum to used for the index);
 
-The first argument is the name of the materialized view, the second the name of the view on which the materialized view is based
+The first argument is the name of the materializd view, the second the name of the view on which the materialized view is based
 and the third is the column name on which the index should be build (aka most od the time the primary key). This column is not
-automatically deduced so you need to replace its name.
+automatically deduced so you need to repace its name.
 
 As said above Ora2Pg only supports snapshot materialized views so the table will be entirely refreshed by issuing first a truncate
 of the table and then by load again all data from the view:
@@ -2342,10 +2205,10 @@
         5 = difficult: stored functions and/or triggers with code rewriting
 
 This assessment consist in a letter A or B to specify if the migration needs
-manual rewriting or not. And a number from 1 up to 5 to give you a technical
+manual rewritting or not. And a number from 1 up to 5 to give you a technical
 difficulty level. You have an additional option --human_days_limit to specify
 the number of human-days limit where the migration level should be set to C
-to indicate that it need a huge amount of work and a full project management
+to indicate that it need a huge amount of work and a full project managment
 with migration support. Default is 10 human-days. You can use the configuration
 directive HUMAN_DAYS_LIMIT to change this default value permanently.
 
@@ -2384,7 +2247,7 @@
    The CSV field separator must be a comma.
 
 It will generate a CSV file with the assessment result, one line per schema or
-database and a detailed HTML report for each database scanned.
+database and a detailled HTML report for each database scanned.
 
 Hint: Use the -t | --test option before to test all your connections in your
 CSV file.
@@ -2401,7 +2264,7 @@
 in the same Perl library lib/Ora2Pg/PLSQL.pm in the hash %UNCOVERED_SCORE initialization.
 
 This assessment method is a work in progress so I'm expecting feedbacks on migration
-experiences to polish the scores/units attributed in those variables.
+experiences to polish the scores/units attribued in those variables.
 
 =head2 Improving indexes and constraints creation speed
 
@@ -2432,7 +2295,7 @@
 
 need to be "translated" into a table using BLOB as follow:
 
-	CREATE TABLE test_blob (id NUMBER, c1 BLOB);
+	CREATE TABLE test_blob (id NUMNER, c1 BLOB);
 
 And then copy the data with the following INSERT query:
 
@@ -2442,29 +2305,6 @@
 directive) and to renamed the new temporary table on the fly using the
 REPLACE_TABLES configuration directive.
 
-=head2 Global variables
-
-Oracle allow the use of global variables defined in packages. Ora2Pg will
-export these variables for PostgreSQL as user defined custom variables
-available in a session. Oracle variables assignement are exported as
-call to:
-
-    PERFORM set_config('pkgname.varname', value, false);
-
-Use of these variables in the code is replaced by:
-
-    current_setting('pkgname.varname')::global_variables_type;
-
-where global_variables_type is the type of the variable extracted from
-the package definition.
-
-If the variable is a constant or have a default value assigned at
-declaration, ora2pg will create a file global_variables.conf with
-the definition to include in the postgresql.conf file so that their
-values will already be set at database connection. Note that the
-value can always modified by the user so you can not have exactly
-a constant.
-
 =head2 Hints
 
 Converting your queries with Oracle style outer join (+) syntax to ANSI standard SQL at
@@ -2587,7 +2427,7 @@
 
 =head1 LICENSE
 
-Copyright (c) 2000-2017 Gilles Darold - All rights reserved.
+Copyright (c) 2000-2016 Gilles Darold - All rights reserved.
 
 	This program is free software: you can redistribute it and/or modify
 	it under the terms of the GNU General Public License as published by
@@ -2605,5 +2445,5 @@
 
 =head1 ACKNOWLEDGEMENT
 
-I must thanks a lot all the great contributors, see changelog for all acknowledgments.
+I must thanks a lot all the great contributors, see changelog for all acknowledgements.
 
diff -Nru ora2pg-18.0/doc/ora2pg.3 ora2pg-17.6/doc/ora2pg.3
--- ora2pg-18.0/doc/ora2pg.3	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/doc/ora2pg.3	2016-11-18 05:45:49.000000000 +0800
@@ -1,4 +1,4 @@
-.\" Automatically generated by Pod::Man 2.25 (Pod::Simple 3.16)
+.\" Automatically generated by Pod::Man 2.27 (Pod::Simple 3.28)
 .\"
 .\" Standard preamble:
 .\" ========================================================================
@@ -38,6 +38,8 @@
 .    ds PI \(*p
 .    ds L" ``
 .    ds R" ''
+.    ds C`
+.    ds C'
 'br\}
 .\"
 .\" Escape single quotes in literal strings from groff's Unicode transform.
@@ -48,17 +50,24 @@
 .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
 .\" entries marked with X<> in POD.  Of course, you'll have to process the
 .\" output yourself in some meaningful fashion.
-.ie \nF \{\
-.    de IX
-.    tm Index:\\$1\t\\n%\t"\\$2"
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
 ..
-.    nr % 0
-.    rr F
-.\}
-.el \{\
-.    de IX
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+.    if \nF \{
+.        de IX
+.        tm Index:\\$1\t\\n%\t"\\$2"
 ..
+.        if !\nF==2 \{
+.            nr % 0
+.            nr F 2
+.        \}
+.    \}
 .\}
+.rr rF
 .\"
 .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
 .\" Fear.  Run.  Save yourself.  No user-serviceable parts.
@@ -124,7 +133,7 @@
 .\" ========================================================================
 .\"
 .IX Title "ORA2PG 1"
-.TH ORA2PG 1 "2017-01-29" "perl v5.14.2" "User Contributed Perl Documentation"
+.TH ORA2PG 1 "2016-11-16" "perl v5.18.2" "User Contributed Perl Documentation"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
@@ -147,10 +156,10 @@
 .IX Header "FEATURES"
 Ora2Pg consist of a Perl script (ora2pg) and a Perl module (Ora2Pg.pm), the
 only thing you have to modify is the configuration file ora2pg.conf by setting
-the \s-1DSN\s0 to the Oracle database and optionally the name of a schema. Once that's
+the \s-1DSN\s0 to the Oracle database and optionaly the name of a schema. Once that's
 done you just have to set the type of export you want: \s-1TABLE\s0 with constraints,
-\&\s-1VIEW\s0, \s-1MVIEW\s0, \s-1TABLESPACE\s0, \s-1SEQUENCE\s0, \s-1INDEXES\s0, \s-1TRIGGER\s0, \s-1GRANT\s0, \s-1FUNCTION\s0, \s-1PROCEDURE\s0,
-\&\s-1PACKAGE\s0, \s-1PARTITION\s0, \s-1TYPE\s0, \s-1INSERT\s0 or \s-1COPY\s0, \s-1FDW\s0, \s-1QUERY\s0, \s-1KETTLE\s0, \s-1SYNONYM\s0.
+\&\s-1VIEW, MVIEW, TABLESPACE, SEQUENCE, INDEXES, TRIGGER, GRANT, FUNCTION, PROCEDURE,
+PACKAGE, PARTITION, TYPE, INSERT\s0 or \s-1COPY, FDW, QUERY, KETTLE, SYNONYM.\s0
 .PP
 By default Ora2Pg exports to a file that you can load into PostgreSQL with the
 psql client, but you can also import directly into a PostgreSQL database by
@@ -194,7 +203,7 @@
 Ora2Pg do its best to automatically convert your Oracle database to PostgreSQL
 but there's still manual works to do. The Oracle specific \s-1PL/SQL\s0 code generated
 for functions, procedures, packages and triggers has to be reviewed to match
-the PostgreSQL syntax. You will find some useful recommendations on porting
+the PostgreSQL syntax. You will find some useful recommandations on porting
 Oracle \s-1PL/SQL\s0 code to PostgreSQL \s-1PL/PGSQL\s0 at \*(L"Converting from other Databases
 to PostgreSQL\*(R", section: Oracle (http://wiki.postgresql.org/wiki/Main_Page).
 .PP
@@ -202,7 +211,7 @@
 migration report.
 .SH "INSTALLATION"
 .IX Header "INSTALLATION"
-All Perl modules can always be found at \s-1CPAN\s0 (http://search.cpan.org/). Just
+All Perl modules can always be found at \s-1CPAN \s0(http://search.cpan.org/). Just
 type the full name of the module (ex: DBD::Oracle) into the search input box,
 it will brings you the page for download.
 .PP
@@ -216,15 +225,11 @@
 an already packaged DBD::Oracle easy to install.
 .SS "Requirement"
 .IX Subsection "Requirement"
-You need a modern Perl distribution (perl 5.10 and more). To connect to a
-database and proceed to his migration you need the \s-1DBI\s0 Perl module > 1.614.
-To migrate an Oracle database you need the DBD::Oracle Perl modules to be
-installed. To migrate a MySQL database you need the DBD::MySQL Perl modules.
-These modules are used to connect to the database but they are not mandatory
-if you want to migrate \s-1DDL\s0 input files.
-.PP
-To install DBD::Oracle and have it working you need to have the Oracle client
-libraries installed and the \s-1ORACLE_HOME\s0 environment variable must be defined.
+You need a modern Perl distribution (perl 5.10 and more), the \s-1DBI \s0> 1.614 and
+DBD::Oracle Perl modules to be installed. These are used to connect to the
+Oracle database. To install DBD::Oracle and have it working you need to have
+the Oracle client libraries installed and the \s-1ORACLE_HOME\s0 environment variable
+must be defined.
 .PP
 If you plan to export a MySQL database you need to install the Perl module
 DBD::mysql which require that the mysql client libraries are installed.
@@ -238,16 +243,16 @@
 client installed. If you prefer to load export 'on the fly', the perl module
 DBD::Pg is required.
 .PP
-Ora2Pg allow to dump all output in a compressed gzip file, to do that you need
+Ora2Pg allow to dump all output int a compressed gzip file, to do that you need
 the Compress::Zlib Perl module or if you prefer using bzip2 compression, the
-program bzip2 must be available in your \s-1PATH\s0.
+program bzip2 must be available in your \s-1PATH.\s0
 .SS "Installing Ora2Pg"
 .IX Subsection "Installing Ora2Pg"
 Like any other Perl Module Ora2Pg can be installed with the following commands:
 .PP
 .Vb 4
-\&        tar xzf ora2pg\-x.x.tar.gz
-\&        cd ora2pg\-x.x/
+\&        tar xzf ora2pg\-10.x.tar.gz
+\&        cd ora2pg\-10.x/
 \&        perl Makefile.PL
 \&        make && make install
 .Ve
@@ -268,11 +273,11 @@
 .IX Subsection "Packaging"
 If you want to build binary package for your preferred Linux distribution take a
 look at the packaging/ directory of the source tarball. There's everything to
-build \s-1RPM\s0, Slackware and Debian packages. See \s-1README\s0 file in that directory.
+build \s-1RPM,\s0 Slackware and Debian packages. See \s-1README\s0 file in that directory.
 .SS "Installing DBD::Oracle"
 .IX Subsection "Installing DBD::Oracle"
 Ora2Pg need perl module DBD::Oracle for connectivity to an Oracle database from
-perl \s-1DBI\s0. To get DBD::Oracle get it from \s-1CPAN\s0 a perl module repository.
+perl \s-1DBI.\s0 To get DBD::Oracle get it from \s-1CPAN\s0 a perl module repository.
 .PP
 After setting \s-1ORACLE_HOME\s0 and \s-1LD_LIBRARY_PATH\s0 environment variables as root
 user, install DBD::Oracle. Proceed as follow:
@@ -327,7 +332,7 @@
 set at the time they are read in the configuration file.
 .PP
 For configuration directives that just take a single value, you can use them
-multiple time in the configuration file but only the last occurrence found
+multiple time in the configuration file but only the last occurence found
 in the file will be used. For configuration directives that allow a list
 of value, you can use it multiple time, the values will be appended to the
 list. If you use the \s-1IMPORT\s0 directive to load a custom configuration file,
@@ -368,7 +373,6 @@
 \&    \-c | \-\-conf file  : Used to set an alternate configuration file than the
 \&                        default /etc/ora2pg/ora2pg.conf.
 \&    \-d | \-\-debug      : Enable verbose output.
-\&    \-D | \-\-data_type STR : Allow custom type replacement at command line.
 \&    \-e | \-\-exclude str: coma separated list of objects to exclude from export.
 \&                        Can be used with SHOW_COLUMN too.
 \&    \-h | \-\-help       : Print this short help.
@@ -437,7 +441,7 @@
     \*(L"\s-1WARNING:\s0 an error occurs during data export. Please check what's happen.\*(R"
 Most of the time this is an \s-1OOM\s0 issue, you might first reduce \s-1DATA_LIMIT\s0 value.
 .PP
-For developers, it is possible to add your own custom option(s) in the Perl
+For developpers, it is possible to add your own custom option(s) in the Perl
 script ora2pg as any configuration directive from ora2pg.conf can be passed
 in lower case to the new Ora2Pg object instance. See ora2pg code on how to
 add your own option.
@@ -516,7 +520,7 @@
 by the DBD::Oracle Perl module.
 .IP "\s-1ORACLE_DSN\s0" 4
 .IX Item "ORACLE_DSN"
-This directive is used to set the data source name in the form standard \s-1DBI\s0 \s-1DSN\s0.
+This directive is used to set the data source name in the form standard \s-1DBI DSN.\s0
 For example:
 .Sp
 .Vb 1
@@ -541,10 +545,10 @@
 .IP "\s-1USER_GRANTS\s0" 4
 .IX Item "USER_GRANTS"
 Set this directive to 1 if you connect the Oracle database as simple user and
-do not have enough grants to extract things from the \s-1DBA_\s0... tables. It will
-use tables \s-1ALL_\s0... instead.
+do not have enough grants to extract things from the \s-1DBA_...\s0 tables. It will
+use tables \s-1ALL_...\s0 instead.
 .Sp
-Warning: if you use export type \s-1GRANT\s0, you must set this configuration option
+Warning: if you use export type \s-1GRANT,\s0 you must set this configuration option
 to 0 or it will not works.
 .IP "\s-1TRANSACTION\s0" 4
 .IX Item "TRANSACTION"
@@ -560,18 +564,18 @@
 \&        committed: \*(AqSET TRANSACTION ISOLATION LEVEL READ COMMITTED\*(Aq,
 .Ve
 .Sp
-Releases before 6.2 used to set the isolation level to \s-1READ\s0 \s-1ONLY\s0 transaction
+Releases before 6.2 used to set the isolation level to \s-1READ ONLY\s0 transaction
 but in some case this was breaking data consistency so now default is set to
-\&\s-1SERIALIZABLE\s0.
+\&\s-1SERIALIZABLE.\s0
 .IP "\s-1INPUT_FILE\s0" 4
 .IX Item "INPUT_FILE"
 This directive did not control the Oracle database connection or unless it
 purely disable the use of any Oracle database by accepting a file as argument.
 Set this directive to a file containing \s-1PL/SQL\s0 Oracle Code like function,
 procedure or full package body to prevent Ora2Pg from connecting to an
-Oracle database and just apply his conversion tool to the content of the
-file. This can be used with the most of export types: \s-1TABLE\s0, \s-1TRIGGER\s0, \s-1PROCEDURE\s0,
-\&\s-1VIEW\s0, \s-1FUNCTION\s0 or \s-1PACKAGE\s0, etc.
+Oracle database and just apply his convertion tool to the content of the
+file. This can be used with the most of export types: \s-1TABLE, TRIGGER, PROCEDURE,
+VIEW, FUNCTION\s0 or \s-1PACKAGE,\s0 etc.
 .IP "\s-1ORA_INITIAL_COMMAND\s0" 4
 .IX Item "ORA_INITIAL_COMMAND"
 This directive can be used to send an initial command to Oracle, just after
@@ -619,7 +623,7 @@
 the error and your settings and try again. The perl script says nothing and the
 output file is empty: the user has not enough right to extract something from
 the database. Try to connect Oracle as super user or take a look at directive
-\&\s-1USER_GRANTS\s0 above and at next section, especially the \s-1SCHEMA\s0 directive.
+\&\s-1USER_GRANTS\s0 above and at next section, especiallly the \s-1SCHEMA\s0 directive.
 .IP "\s-1LOGFILE\s0" 4
 .IX Item "LOGFILE"
 By default all message are sent to the standard output. If you give a file path to that directive, all output will be appended to this file.
@@ -648,10 +652,10 @@
 export this schema and create all objects under this namespace, set the
 \&\s-1EXPORT_SCHEMA\s0 directive to 1. This will set the schema search_path at top of
 export \s-1SQL\s0 file to the schema name set in the \s-1SCHEMA\s0 directive with the default
-pg_catalog schema. If you want to change this path, use the directive \s-1PG_SCHEMA\s0.
+pg_catalog schema. If you want to change this path, use the directive \s-1PG_SCHEMA.\s0
 .IP "\s-1CREATE_SCHEMA\s0" 4
 .IX Item "CREATE_SCHEMA"
-Enable/disable the \s-1CREATE\s0 \s-1SCHEMA\s0 \s-1SQL\s0 order at starting of the output file.
+Enable/disable the \s-1CREATE SCHEMA SQL\s0 order at starting of the output file.
 It is enable by default and concern on \s-1TABLE\s0 export type.
 .IP "\s-1COMPILE_SCHEMA\s0" 4
 .IX Item "COMPILE_SCHEMA"
@@ -696,7 +700,7 @@
 .Sp
 You can also set the default search_path for the connected PostgreSQL user by
 using:
-	\s-1ALTER\s0 \s-1ROLE\s0 username \s-1SET\s0 search_path \s-1TO\s0 user_schema, public, pg_catalog;
+	\s-1ALTER ROLE\s0 username \s-1SET\s0 search_path \s-1TO\s0 user_schema, public, pg_catalog;
 .IP "\s-1SYSUSERS\s0" 4
 .IX Item "SYSUSERS"
 Without explicit schema, Ora2Pg will export all objects that not belongs to
@@ -735,10 +739,10 @@
 .IX Item "USE_TABLESPACE"
 When enabled this directive force ora2pg to export all tables, indexes constraint and
 indexes using the tablespace name defined in Oracle database. This works only with
-tablespace that are not \s-1TEMP\s0, \s-1USERS\s0 and \s-1SYSTEM\s0.
+tablespace that are not \s-1TEMP, USERS\s0 and \s-1SYSTEM.\s0
 .IP "\s-1WITH_OID\s0" 4
 .IX Item "WITH_OID"
-Activating this directive will force Ora2Pg to add \s-1WITH\s0 (\s-1OIDS\s0) when creating
+Activating this directive will force Ora2Pg to add \s-1WITH \s0(\s-1OIDS\s0) when creating
 tables or views as tables. Default is same as PostgreSQL, disabled.
 .SS "Export type"
 .IX Subsection "Export type"
@@ -780,8 +784,8 @@
 be registered.
 .Sp
 Some export type can not or should not be load directly into the PostgreSQL
-database and still require little manual editing. This is the case for \s-1GRANT\s0,
-\&\s-1TABLESPACE\s0, \s-1TRIGGER\s0, \s-1FUNCTION\s0, \s-1PROCEDURE\s0, \s-1TYPE\s0, \s-1QUERY\s0 and \s-1PACKAGE\s0 export types
+database and still require little manual editing. This is the case for \s-1GRANT,
+TABLESPACE, TRIGGER, FUNCTION, PROCEDURE, TYPE, QUERY\s0 and \s-1PACKAGE\s0 export types
 especially if you have \s-1PLSQL\s0 code or Oracle specific \s-1SQL\s0 in it.
 .Sp
 For \s-1TABLESPACE\s0 you must ensure that file path exist on the system and for
@@ -791,7 +795,7 @@
 Note that you can chained multiple export by giving to the \s-1TYPE\s0 directive a
 comma-separated list of export type.
 .Sp
-Ora2Pg will convert Oracle partition using table inheritance, trigger and
+Ora2Pg will convert Oracle partition using table inheritence, trigger and
 functions. See document at Pg site:
 http://www.postgresql.org/docs/current/interactive/ddl\-partitioning.html
 .Sp
@@ -856,7 +860,7 @@
 See http://pgxn.org/dist/oracle_fdw/ for more information on this foreign data
 wrapper.
 .Sp
-Release 10 adds a new export type destined to evaluate the content of the
+Release 10 adds a new export type destinated to evaluate the content of the
 database to migrate, in terms of objects and cost to end the migration:
 .Sp
 .Vb 1
@@ -869,10 +873,10 @@
 about Migration Cost Evaluation.
 .IP "\s-1ESTIMATE_COST\s0" 4
 .IX Item "ESTIMATE_COST"
-Activate the migration cost evaluation. Must only be used with \s-1SHOW_REPORT\s0,
-\&\s-1FUNCTION\s0, \s-1PROCEDURE\s0, \s-1PACKAGE\s0 and \s-1QUERY\s0 export type. Default is disabled.
-You may want to use the \-\-estimate_cost command line option instead to activate
-this functionality. Note that enabling this directive will force \s-1PLSQL_PGSQL\s0
+Activate the migration cost evaluation. Must only be used with \s-1SHOW_REPORT,
+FUNCTION, PROCEDURE, PACKAGE\s0 and \s-1QUERY\s0 export type. Default is disabled.
+You may wat to use the \-\-estimate_cost command line option instead to activate
+this functionnality. Note that enabling this directive will force \s-1PLSQL_PGSQL\s0
 activation.
 .IP "\s-1COST_UNIT_VALUE\s0" 4
 .IX Item "COST_UNIT_VALUE"
@@ -945,17 +949,17 @@
 This directive is used to defined the number of tables that will be processed
 in parallel for data extraction. The limit is the number of cores on your machine.
 Ora2Pg will open one database connection for each parallel table extraction.
-This directive, when upper than 1, will invalidate \s-1ORACLE_COPIES\s0 but not \s-1JOBS\s0,
-so the real number of process that will be used is \s-1PARALLEL_TABLES\s0 * \s-1JOBS\s0.
+This directive, when upper than 1, will invalidate \s-1ORACLE_COPIES\s0 but not \s-1JOBS,\s0
+so the real number of process that will be used is \s-1PARALLEL_TABLES\s0 * \s-1JOBS.\s0
 .Sp
 Note that this directive when set upper that 1 will also automatically enable
 the \s-1FILE_PER_TABLE\s0 directive if your are exporting to files.
 .IP "\s-1FDW_SERVER\s0" 4
 .IX Item "FDW_SERVER"
 This directive is used to set the name of the foreign data server that is used
-in the \*(L"\s-1CREATE\s0 \s-1SERVER\s0 name \s-1FOREIGN\s0 \s-1DATA\s0 \s-1WRAPPER\s0 oracle_fdw ...\*(R" command. This
-name will then be used in the \*(L"\s-1CREATE\s0 \s-1FOREIGN\s0 \s-1TABLE\s0 ...\*(R" \s-1SQL\s0 command. Default
-is arbitrary set to orcl. This only concern export type \s-1FDW\s0.
+in the \*(L"\s-1CREATE SERVER\s0 name \s-1FOREIGN DATA WRAPPER\s0 oracle_fdw ...\*(R" command. This
+name will then be used in the \*(L"\s-1CREATE FOREIGN TABLE ...\*(R" SQL\s0 command. Default
+is arbitrary set to orcl. This only concern export type \s-1FDW.\s0
 .IP "\s-1EXTERNAL_TO_FDW\s0" 4
 .IX Item "EXTERNAL_TO_FDW"
 This directive, enabled by default, allow to export Oracle's External Tables as
@@ -964,7 +968,7 @@
 .IP "\s-1INTERNAL_DATE_MAX\s0" 4
 .IX Item "INTERNAL_DATE_MAX"
 Internal timestamp retrieves from custom type are extracted in the following
-format: 01\-JAN\-77 12.00.00.000000 \s-1AM\s0. It is impossible to know the exact century
+format: 01\-JAN\-77 12.00.00.000000 \s-1AM.\s0 It is impossible to know the exact century
 that must be used, so by default any year below 49 will be added to 2000
 and others to 1900. You can use this directive to change the default value 49.
 this is only relevant if you have user defined type with a column timestamp.
@@ -992,7 +996,7 @@
 \&        ALLOW           EMPLOYEES SALE_.* COUNTRIES .*_GEOM_SEQ
 .Ve
 .Sp
-will export objects with name \s-1EMPLOYEES\s0, \s-1COUNTRIES\s0, all objects beginning with
+will export objects with name \s-1EMPLOYEES, COUNTRIES,\s0 all objects begining with
 \&'\s-1SALE_\s0' and all objects with a name ending by '_GEOM_SEQ'. The object depends
 of the export type. Note that regex will not works with 8i database, you must
 use the % placeholder instead, Ora2Pg will use the \s-1LIKE\s0 operator.
@@ -1006,7 +1010,7 @@
 .Ve
 .Sp
 will limit export of trigger to those defined on table employees. If you want
-to extract all triggers but not some \s-1INSTEAD\s0 \s-1OF\s0 triggers:
+to extract all triggers but not some \s-1INSTEAD OF\s0 triggers:
 .Sp
 .Vb 1
 \&        ora2pg \-c ora2pg.conf \-t TRIGGER \-e \*(AqVIEW[trg_view_.*]\*(Aq
@@ -1020,7 +1024,7 @@
 .Ve
 .Sp
 This command will export the definition of the employee table but will exclude
-all index beginning with 'emp_' and the \s-1CHECK\s0 constraint called 'emp_salary_min'.
+all index begining with 'emp_' and the \s-1CHECK\s0 contraint called 'emp_salary_min'.
 .Sp
 When exporting partition you can exclude some partition tables by using
 .Sp
@@ -1028,7 +1032,7 @@
 \&        ora2pg \-p \-c ora2pg.conf \-t PARTITION \-e \*(AqPARTITION[PART_199.* PART_198.*]\*(Aq
 .Ve
 .Sp
-This will exclude partitioned tables for year 1980 to 1999 from the export but
+This will exclude partitionned tables for year 1980 to 1999 from the export but
 not the main partition table. The trigger will also be adapted to exclude those
 table.
 .Sp
@@ -1045,7 +1049,7 @@
 \&        ora2pg \-p \-c ora2pg.conf \-t GRANT \-a \*(AqGRANT[USER1 USER2]\*(Aq
 .Ve
 .Sp
-will limit export grants to users \s-1USER1\s0 and \s-1USER2\s0. But if you don't want to
+will limit export grants to users \s-1USER1\s0 and \s-1USER2.\s0 But if you don't want to
 export grants on some functions for these users, for example:
 .Sp
 .Vb 1
@@ -1063,7 +1067,7 @@
 \&        EXCLUDE         EMPLOYEES TMP_.* COUNTRIES
 .Ve
 .Sp
-will exclude object with name \s-1EMPLOYEES\s0, \s-1COUNTRIES\s0 and all tables beginning with
+will exclude object with name \s-1EMPLOYEES, COUNTRIES\s0 and all tables begining with
 \&'tmp_'.
 .Sp
 For example, you can ban from export some unwanted function with this directive:
@@ -1073,17 +1077,17 @@
 .Ve
 .Sp
 this example will exclude all functions, procedures or functions in a package
-with the name beginning with those regex. Note that regex will not works with
-8i database, you must use the % placeholder instead, Ora2Pg will use the \s-1NOT\s0
-\&\s-1LIKE\s0 operator.
+with the name begining with those regex. Note that regex will not works with
+8i database, you must use the % placeholder instead, Ora2Pg will use the \s-1NOT
+LIKE\s0 operator.
 .Sp
 See above (directive '\s-1ALLOW\s0') for the extended syntax.
 .IP "\s-1VIEW_AS_TABLE\s0" 4
 .IX Item "VIEW_AS_TABLE"
 Set which view to export as table. By default none. Value must be a list of
 view name or regexp separated by space or comma. If the object name is a view
-and the export type is \s-1TABLE\s0, the view will be exported as a create table
-statement. If export type is \s-1COPY\s0 or \s-1INSERT\s0, the corresponding data will be
+and the export type is \s-1TABLE,\s0 the view will be exported as a create table
+statement. If export type is \s-1COPY\s0 or \s-1INSERT,\s0 the corresponding data will be
 exported.
 .Sp
 See chapter \*(L"Exporting views as PostgreSQL table\*(R" for more details.
@@ -1142,7 +1146,7 @@
 .IX Item "REPLACE_QUERY"
 Sometime you may want to extract data from an Oracle table but you need a
 a custom query for that. Not just a \*(L"\s-1SELECT\s0 * \s-1FROM\s0 table\*(R" like Ora2Pg do
-but a more complex query. This directive allow you to overwrite the query
+but a more complexe query. This directive allow you to overwrite the query
 used by Ora2Pg to extract data. The format is TABLENAME[\s-1SQL_QUERY\s0].
 If you have multiple table to extract by replacing the Ora2Pg query, you can
 define multiple \s-1REPLACE_QUERY\s0 lines.
@@ -1150,122 +1154,12 @@
 .Vb 1
 \&        REPLACE_QUERY   EMPLOYEES[SELECT e.id,e.fisrtname,lastname FROM EMPLOYEES e JOIN EMP_UPDT u ON (e.id=u.id AND u.cdate>\*(Aq2014\-08\-01 00:00:00\*(Aq)]
 .Ve
-.SS "Controm of Full Text Search export"
-.IX Subsection "Controm of Full Text Search export"
-Several directives can be used to control the way Ora2Pg will export the
-Oracle's Text search indexes. By default \s-1CONTEXT\s0 indexes will be exported
-to PostgreSQL \s-1FTS\s0 indexes but \s-1CTXCAT\s0 indexes wikk be exported as indexes
-using the pg_trgm extension.
 .IP "\s-1CONTEXT_AS_TRGM\s0" 4
 .IX Item "CONTEXT_AS_TRGM"
 Force Ora2Pg to translate Oracle Text indexes into PostgreSQL indexes using
 pg_trgm extension. Default is to translate \s-1CONTEXT\s0 indexes into \s-1FTS\s0 indexes
 and \s-1CTXCAT\s0 indexes using pg_trgm. Most of the time using pg_trgm is enough,
-this is why this directive stand for. You need to create the pg_trgm extension
-into the destination database before importing the objects:
-.Sp
-.Vb 1
-\&        CREATE EXTENSION pg_trgm;
-.Ve
-.IP "\s-1FTS_INDEX_ONLY\s0" 4
-.IX Item "FTS_INDEX_ONLY"
-By default Ora2Pg creates a function-based index to translate Oracle Text
-indexes.
-.Sp
-.Vb 2
-\&        CREATE INDEX ON t_document
-\&                USING gin(to_tsvector(\*(Aqpg_catalog.french\*(Aq, title));
-.Ve
-.Sp
-You will have to rewrite the \s-1\fICONTAIN\s0()\fR clause using \fIto_tsvector()\fR, example:
-.Sp
-.Vb 2
-\&        SELECT id,title FROM t_document
-\&                WHERE to_tsvector(title)) @@ to_tsquery(\*(Aqsearch_word\*(Aq);
-.Ve
-.Sp
-To force Ora2Pg to create an extra tsvector column with a dedicated triggers
-for \s-1FTS\s0 indexes, disable this directive. In this case, Ora2Pg will add the
-column as follow: \s-1ALTER\s0 \s-1TABLE\s0 t_document \s-1ADD\s0 \s-1COLUMN\s0 tsv_title tsvector;
-Then update the column to compute \s-1FTS\s0 vectors if data have been loaded before
-	    \s-1UPDATE\s0 t_document \s-1SET\s0 tsv_title =
-		to_tsvector('pg_catalog.french', coalesce(title,''));
-To automatically update the column when a modification in the title column
-appears, Ora2Pg adds the following trigger:
-.Sp
-.Vb 12
-\&        CREATE FUNCTION tsv_t_document_title() RETURNS trigger AS $$
-\&        BEGIN
-\&               IF TG_OP = \*(AqINSERT\*(Aq OR new.title != old.title THEN
-\&                       new.tsv_title :=
-\&                       to_tsvector(\*(Aqpg_catalog.french\*(Aq, coalesce(new.title,\*(Aq\*(Aq));
-\&               END IF;
-\&               return new;
-\&        END
-\&        $$ LANGUAGE plpgsql;
-\&        CREATE TRIGGER trig_tsv_t_document_title BEFORE INSERT OR UPDATE
-\&         ON t_document
-\&         FOR EACH ROW EXECUTE PROCEDURE tsv_t_document_title();
-.Ve
-.Sp
-When the Oracle text index is defined over multiple column, Ora2Pg will use
-\&\fIsetweight()\fR to set a weight in the order of the column declaration.
-.IP "\s-1FTS_CONFIG\s0" 4
-.IX Item "FTS_CONFIG"
-Use this directive to force text search configuration to use. When it is not
-set, Ora2Pg will autodetect the stemmer used by Oracle for each index and
-pg_catalog.english if the information is not found.
-.IP "\s-1USE_UNACCENT\s0" 4
-.IX Item "USE_UNACCENT"
-If you want to perform your text search in an accent insensitive way, enable
-this directive. Ora2Pg will create an helper function over \fIunaccent()\fR and
-creates the pg_trgm indexes using this function. With \s-1FTS\s0 Ora2Pg will
-redefine your text search configuration, for example:
-.Sp
-.Vb 3
-\&      CREATE TEXT SEARCH CONFIGURATION fr (COPY = french); 
-\&      ALTER TEXT SEARCH CONFIGURATION fr
-\&              ALTER MAPPING FOR hword, hword_part, word WITH unaccent, french_stem;
-.Ve
-.Sp
-then set the \s-1FTS_CONFIG\s0 ora2pg.conf directive to fr instead of pg_catalog.english.
-.Sp
-When enabled, Ora2pg will create the wrapper function:
-.Sp
-.Vb 6
-\&      CREATE OR REPLACE FUNCTION unaccent_immutable(text)
-\&      RETURNS text AS
-\&      $$
-\&          SELECT public.unaccent(\*(Aqpublic.unaccent\*(Aq, $1);
-\&      $$ LANGUAGE sql IMMUTABLE
-\&         COST 1;
-.Ve
-.Sp
-the indexes are exported as follow:
-.Sp
-.Vb 2
-\&      CREATE INDEX t_document_title_unaccent_trgm_idx ON t_document 
-\&          USING gin (unaccent_immutable(title) gin_trgm_ops);
-.Ve
-.Sp
-In your queries you will need to use the same function in the search to
-be able to use the function-based index. Example:
-.Sp
-.Vb 2
-\&        SELECT * FROM t_document
-\&                WHERE unaccent_immutable(title) LIKE \*(Aq%donnees%\*(Aq;
-.Ve
-.IP "\s-1USE_LOWER_UNACCENT\s0" 4
-.IX Item "USE_LOWER_UNACCENT"
-Same as above but call \fIlower()\fR in the \fIunaccent_immutable()\fR function:
-.Sp
-.Vb 5
-\&      CREATE OR REPLACE FUNCTION unaccent_immutable(text)
-\&      RETURNS text AS
-\&      $$
-\&          SELECT lower(public.unaccent(\*(Aqpublic.unaccent\*(Aq, $1));
-\&      $$ LANGUAGE sql IMMUTABLE;
-.Ve
+this is why this directive stand for.
 .SS "Modifying object structure"
 .IX Subsection "Modifying object structure"
 One of the great usage of Ora2Pg is its flexibility to replicate Oracle database
@@ -1297,6 +1191,19 @@
 This will only extract columns 'id' and 'dossier' from table T_TEST1 and columns
 \&'id' and 'fichier' from the T_TEST2 table. This directive is only used with \s-1COPY\s0
 or \s-1INSERT\s0 export.
+.IP "\s-1MODIFY_TYPE\s0" 4
+.IX Item "MODIFY_TYPE"
+Some time you need to force the destination type, for example a column
+exported as timestamp by Ora2Pg can be forced into type date. Value is
+a comma-separated list of \s-1TABLE:COLUMN:TYPE\s0 structure. If you need to use
+comma or space inside type definition you will have to backslach them.
+.Sp
+.Vb 1
+\&        MODIFY_TYPE     TABLE1:COL3:varchar,TABLE1:COL4:decimal(9\e,6)
+.Ve
+.Sp
+Type of table1.col3 will be replaced by a varchar and table1.col4 by
+a decimal with precision.
 .IP "\s-1REPLACE_TABLES\s0" 4
 .IX Item "REPLACE_TABLES"
 This directive allow you to remap a list of Oracle table name to a PostgreSQL table name during export. The value is a list of space-separated values with the following structure:
@@ -1310,7 +1217,7 @@
 .IP "\s-1REPLACE_COLS\s0" 4
 .IX Item "REPLACE_COLS"
 Like table name, the name of the column can be remapped to a different name
-using the following syntax:
+using the following syntaxe:
 .Sp
 .Vb 1
 \&        REPLACE_COLS    ORIG_TBNAME(ORIG_COLNAME1:NEW_COLNAME1,ORIG_COLNAME2:NEW_COLNAME2)
@@ -1336,7 +1243,7 @@
 .Sp
 The values set in the boolean columns list will be replaced with the 't' and 'f'
 following the default replacement values and those additionally set in directive
-\&\s-1BOOLEAN_VALUES\s0.
+\&\s-1BOOLEAN_VALUES.\s0
 .Sp
 You can also give a type and a precision to automatically convert all fields of
 that type as a boolean. For example:
@@ -1360,8 +1267,8 @@
 Any values defined here will be added to the default list.
 .IP "\s-1REPLACE_ZERO_DATE\s0" 4
 .IX Item "REPLACE_ZERO_DATE"
-When Ora2Pg find a \*(L"zero\*(R" date: 0000\-00\-00 00:00:00 it is replaced by a \s-1NULL\s0.
-This could be a problem if your column is defined with \s-1NOT\s0 \s-1NULL\s0 constraint.
+When Ora2Pg find a \*(L"zero\*(R" date: 0000\-00\-00 00:00:00 it is replaced by a \s-1NULL.\s0
+This could be a problem if your column is defined with \s-1NOT NULL\s0 constraint.
 If you can not remove the constraint, use this directive to set an arbitral
 date that will be used instead. You can also use \-INFINITY if you don't want
 to use a fake date.
@@ -1392,7 +1299,7 @@
 does not use the standard \*(L"C\*(R" locale. If you enable, with value 1, this will
 force Ora2Pg to export all indexes defined on \fIvarchar2()\fR and \fIchar()\fR columns
 using those operators. If you set it to a value greater than 1 it will only
-change indexes on columns where the character limit is greater or equal than
+change indexes on columns where the charactere limit is greater or equal than
 this value. For example, set it to 128 to create these kind of indexes on
 columns of type varchar2(N) where N >= 128.
 .IP "\s-1PREFIX_PARTITION\s0" 4
@@ -1400,14 +1307,14 @@
 Enable this directive if you want that your partition table name will be
 exported using the parent table name. Disabled by default. If you have
 multiple partitioned table, when exported to PostgreSQL some partitions
-could have the same name but different parent tables. This is not allowed,
+could have the same name but dfferent parent tables. This is not allowed,
 table name must be unique.
 .IP "\s-1DISABLE_PARTITION\s0" 4
 .IX Item "DISABLE_PARTITION"
 If you don't want to reproduce the partitioning like in Oracle and want to
-export all partitioned Oracle data into the main single table in PostgreSQL
+export all partitionned Oracle data into the main single table in PostgreSQL
 enable this directive. Ora2Pg will export all data into the main table name.
-Default is to use partitioning, Ora2Pg will export data from each partition
+Default is to use partitionning, Ora2Pg will export data from each partition
 and import them into the PostgreSQL dedicated partition table.
 .SS "Oracle Spatial to PostGis"
 .IX Subsection "Oracle Spatial to PostGis"
@@ -1435,7 +1342,7 @@
 desired number of line to scan. The directive is enabled by default.
 .Sp
 For example, in the case of a column named shape and defined with Oracle type
-\&\s-1SDO_GEOMETRY\s0, with \s-1AUTODETECT_SPATIAL_TYPE\s0 disabled it will be converted as:
+\&\s-1SDO_GEOMETRY,\s0 with \s-1AUTODETECT_SPATIAL_TYPE\s0 disabled it will be converted as:
 .Sp
 .Vb 1
 \&    shape geometry(GEOMETRY) or shape geometry(GEOMETRYZ, 4326)
@@ -1451,16 +1358,16 @@
 with a two or three dimensional polygon.
 .IP "\s-1CONVERT_SRID\s0" 4
 .IX Item "CONVERT_SRID"
-This directive allow you to control the automatically conversion of Oracle
-\&\s-1SRID\s0 to standard \s-1EPSG\s0. If enabled, Ora2Pg will use the Oracle function
-sdo_cs.\fImap_oracle_srid_to_epsg()\fR to convert all \s-1SRID\s0. Enabled by default.
+This directive allow you to control the automatically convertion of Oracle
+\&\s-1SRID\s0 to standard \s-1EPSG.\s0 If enabled, Ora2Pg will use the Oracle function
+sdo_cs.\fImap_oracle_srid_to_epsg()\fR to convert all \s-1SRID.\s0 Enabled by default.
 .Sp
-If the \s-1SDO_SRID\s0 returned by Oracle is \s-1NULL\s0, it will be replaced by the
+If the \s-1SDO_SRID\s0 returned by Oracle is \s-1NULL,\s0 it will be replaced by the
 default value 8307 converted to its \s-1EPSG\s0 value: 4326 (see \s-1DEFAULT_SRID\s0).
 .Sp
 If the value is upper than 1, all \s-1SRID\s0 will be forced to this value, in
 this case \s-1DEFAULT_SRID\s0 will not be used when Oracle returns a null value
-and the value will be forced to \s-1CONVERT_SRID\s0.
+and the value will be forced to \s-1CONVERT_SRID.\s0
 .Sp
 Note that it is also possible to set the \s-1EPSG\s0 value on Oracle side when
 sdo_cs.\fImap_oracle_srid_to_epsg()\fR return \s-1NULL\s0 if your want to force the value:
@@ -1470,21 +1377,21 @@
 .Ve
 .IP "\s-1DEFAULT_SRID\s0" 4
 .IX Item "DEFAULT_SRID"
-Use this directive to override the default \s-1EPSG\s0 \s-1SRID\s0 to used: 4326.
-Can be overwritten by \s-1CONVERT_SRID\s0, see above.
+Use this directive to override the default \s-1EPSG SRID\s0 to used: 4326.
+Can be overwritten by \s-1CONVERT_SRID,\s0 see above.
 .IP "\s-1GEOMETRY_EXTRACT_TYPE\s0" 4
 .IX Item "GEOMETRY_EXTRACT_TYPE"
-This directive can take three values: \s-1WKT\s0 (default), \s-1WKB\s0 and \s-1INTERNAL\s0.
-When it is set to \s-1WKT\s0, Ora2Pg will use \s-1SDO_UTIL\s0.\fITO_WKTGEOMETRY()\fR to
-extract the geometry data. When it is set to \s-1WKB\s0, Ora2Pg will use the
-binary output using \s-1SDO_UTIL\s0.\fITO_WKBGEOMETRY()\fR. If those two extract type
-are calls at Oracle side, they are slow and you can easily reach Out Of
+This directive can take three values: \s-1WKT \s0(default), \s-1WKB\s0 and \s-1INTERNAL.\s0
+When it is set to \s-1WKT,\s0 Ora2Pg will use \s-1SDO_UTIL.\fITO_WKTGEOMETRY\s0()\fR to
+extract the geometry data. When it is set to \s-1WKB,\s0 Ora2Pg will use the
+binary output using \s-1SDO_UTIL.\fITO_WKBGEOMETRY\s0()\fR. If those two extract type
+are calles at Oracle side, they are slow and you can easily reach Out Of
 Memory when you have lot of rows. Also \s-1WKB\s0 is not able to export 3D geometry
-and some geometries like \s-1CURVEPOLYGON\s0. In this case you may use the \s-1INTERNAL\s0
+and some geometries like \s-1CURVEPOLYGON.\s0 In this case you may use the \s-1INTERNAL\s0
 extraction type. It will use a Pure Perl library to convert the \s-1SDO_GEOMETRY\s0
 data into a \s-1WKT\s0 representation, the translation is done on Ora2Pg side.
 This is a work in progress, please validate your exported data geometries
-before use. Default spatial object extraction type is \s-1INTERNAL\s0.
+before use. Default spatial object extraction type is \s-1INTERNAL.\s0
 .IP "\s-1POSTGIS_SCHEMA\s0" 4
 .IX Item "POSTGIS_SCHEMA"
 Use this directive to add a specific schema to the search path to look
@@ -1512,8 +1419,8 @@
 .IX Item "BLOB_LIMIT"
 When Ora2Pg detect a table with some \s-1BLOB\s0 it will automatically reduce the
 value of this directive by dividing it by 10 until his value is below 1000.
-You can control this value by setting \s-1BLOB_LIMIT\s0. Exporting \s-1BLOB\s0 use lot of
-resources, setting it to a too high value can produce \s-1OOM\s0.
+You can control this value by setting \s-1BLOB_LIMIT.\s0 Exporting \s-1BLOB\s0 use lot of
+ressources, setting it to a too high value can produce \s-1OOM.\s0
 .IP "\s-1OUTPUT\s0" 4
 .IX Item "OUTPUT"
 The Ora2Pg output filename can be changed with this directive. Default value is
@@ -1523,7 +1430,7 @@
 is installed for the .bz2 extension.
 .IP "\s-1OUTPUT_DIR\s0" 4
 .IX Item "OUTPUT_DIR"
-Since release 7.0, you can define a base directory where the file will be written.
+Since release 7.0, you can define a base directory where wfile will be written.
 The directory must exists.
 .IP "\s-1BZIP2\s0" 4
 .IX Item "BZIP2"
@@ -1532,7 +1439,7 @@
 .IP "\s-1FILE_PER_CONSTRAINT\s0" 4
 .IX Item "FILE_PER_CONSTRAINT"
 Allow object constraints to be saved in a separate file during schema export.
-The file will be named \s-1CONSTRAINTS_OUTPUT\s0, where \s-1OUTPUT\s0 is the value of the
+The file will be named \s-1CONSTRAINTS_OUTPUT,\s0 where \s-1OUTPUT\s0 is the value of the
 corresponding configuration directive. You can use .gz xor .bz2 extension to
 enable compression. Default is to save all data in the \s-1OUTPUT\s0 file. This
 directive is usable only with \s-1TABLE\s0 export type.
@@ -1542,11 +1449,11 @@
 .IP "\s-1FILE_PER_INDEX\s0" 4
 .IX Item "FILE_PER_INDEX"
 Allow indexes to be saved in a separate file during schema export. The file
-will be named \s-1INDEXES_OUTPUT\s0, where \s-1OUTPUT\s0 is the value of the corresponding
+will be named \s-1INDEXES_OUTPUT,\s0 where \s-1OUTPUT\s0 is the value of the corresponding
 configuration directive. You can use .gz xor .bz2 file extension to enable
 compression. Default is to save all data in the \s-1OUTPUT\s0 file. This directive
-is usable only with \s-1TABLE\s0 \s-1AND\s0 \s-1TABLESPACE\s0 export type. With the \s-1TABLESPACE\s0
-export, it is used to write \*(L"\s-1ALTER\s0 \s-1INDEX\s0 ... \s-1TABLESPACE\s0 ...\*(R" into a separate
+is usable only with \s-1TABLE AND TABLESPACE\s0 export type. With the \s-1TABLESPACE\s0
+export, it is used to write \*(L"\s-1ALTER INDEX ... TABLESPACE ...\*(R"\s0 into a separate
 file named \s-1TBSP_INDEXES_OUPUT\s0 that can be loaded at end of the migration after
 the indexes creation to move the indexes.
 .Sp
@@ -1576,14 +1483,14 @@
 packagename_OUTPUT, where \s-1OUTPUT\s0 is the value of the corresponding directive.
 .IP "\s-1TRUNCATE_TABLE\s0" 4
 .IX Item "TRUNCATE_TABLE"
-If this directive is set to 1, a \s-1TRUNCATE\s0 \s-1TABLE\s0 instruction will be add before
+If this directive is set to 1, a \s-1TRUNCATE TABLE\s0 instruction will be add before
 loading data. This is usable only during \s-1INSERT\s0 or \s-1COPY\s0 export type.
 .Sp
 When activated, the instruction will be added only if there's no global \s-1DELETE\s0
 clause or not one specific to the current table (see bellow).
 .IP "\s-1DELETE\s0" 4
 .IX Item "DELETE"
-Support for include a \s-1DELETE\s0 \s-1FROM\s0 ... \s-1WHERE\s0 clause filter before importing
+Support for include a \s-1DELETE FROM ... WHERE\s0 clause filter before importing
 data and perform a delete of some lines instead of truncatinf tables.
 Value is construct as follow: TABLE_NAME[\s-1DELETE_WHERE_CLAUSE\s0], or
 if you have only one where clause for all tables just put the delete
@@ -1603,12 +1510,12 @@
 These \s-1DELETE\s0 clauses might be useful with regular \*(L"updates\*(R".
 .IP "\s-1STOP_ON_ERROR\s0" 4
 .IX Item "STOP_ON_ERROR"
-Set this parameter to 0 to not include the call to \eset \s-1ON_ERROR_STOP\s0 \s-1ON\s0 in
+Set this parameter to 0 to not include the call to \eset \s-1ON_ERROR_STOP ON\s0 in
 all \s-1SQL\s0 scripts generated by Ora2Pg. By default this order is always present
-so that the script will immediately abort when an error is encountered.
+so that the script will immediatly abort when an error is encountered.
 .IP "\s-1COPY_FREEZE\s0" 4
 .IX Item "COPY_FREEZE"
-Enable this directive to use \s-1COPY\s0 \s-1FREEZE\s0 instead of a simple \s-1COPY\s0 to
+Enable this directive to use \s-1COPY FREEZE\s0 instead of a simple \s-1COPY\s0 to
 export data with rows already frozen. This is intended as a performance
 option for initial data loading. Rows will be frozen only if the table
 being loaded has been created or truncated in the current subtransaction.
@@ -1642,129 +1549,9 @@
 off to disable synchronous commit to gain speed at writing data. Some modified
 version of PostgreSQL, like greenplum, do not have this setting, so in this
 set this directive to 1, ora2pg will not  try to change the setting.
-.SS "Column tytpe control"
-.IX Subsection "Column tytpe control"
-.IP "\s-1PG_NUMERIC_TYPE\s0" 4
-.IX Item "PG_NUMERIC_TYPE"
-If set to 1 replace portable numeric type into PostgreSQL internal type.
-Oracle data type \s-1NUMBER\s0(p,s) is approximatively converted to real and
-float PostgreSQL data type. If you have monetary fields or don't want
-rounding issues with the extra decimals you should preserve the same
-numeric(p,s) PostgreSQL data type. Do that only if you need very good
-precision because using numeric(p,s) is slower than using real or double.
-.IP "\s-1PG_INTEGER_TYPE\s0" 4
-.IX Item "PG_INTEGER_TYPE"
-If set to 1 replace portable numeric type into PostgreSQL internal type.
-Oracle data type \s-1NUMBER\s0(p) or \s-1NUMBER\s0 are converted to smallint, integer
-or bigint PostgreSQL data type following the length of the precision. If
-\&\s-1NUMBER\s0 without precision are set to \s-1DEFAULT_NUMERIC\s0 (see bellow).
-.IP "\s-1DEFAULT_NUMERIC\s0" 4
-.IX Item "DEFAULT_NUMERIC"
-\&\s-1NUMBER\s0 without precision are converted by default to bigint only if
-\&\s-1PG_INTEGER_TYPE\s0 is true. You can overwrite this value to any \s-1PG\s0 type,
-like integer or float.
-.IP "\s-1DATA_TYPE\s0" 4
-.IX Item "DATA_TYPE"
-If you're experiencing any problem in data type schema conversion with this
-directive you can take full control of the correspondence between Oracle and
-PostgreSQL types to redefine data type translation used in Ora2pg. The syntax
-is a comma-separated list of \*(L"Oracle datatype:Postgresql datatype\*(R". Here are
-the default list used:
-.Sp
-.Vb 1
-\&        DATA_TYPE       DATE:timestamp,LONG:text,LONG RAW:bytea,CLOB:text,NCLOB:text,BLOB:bytea,BFILE:bytea,RAW:bytea,ROWID:oid,FLOAT:double precision,DEC:decimal,DECIMAL:decimal,DOUBLE PRECISION:double precision,INT:numeric,INTEGER:numeric,REAL:real,SMALLINT:smallint,BINARY_FLOAT:double precision,BINARY_DOUBLE:double precision,TIMESTAMP:timestamp,XMLTYPE:xml,BINARY_INTEGER:integer,PLS_INTEGER:integer,TIMESTAMP WITH TIME ZONE:timestamp with time zone,TIMESTAMP WITH LOCAL TIME ZONE:timestamp with time zone
-.Ve
-.Sp
-Note that the directive and the list definition must be a single line.
-.Sp
-If you want to replace a type with a precision and scale you need to escape
-the coma with a backslash. For example, if you want to replace all \s-1NUMBER\s0(*,0)
-into bigint instead of numeric(38) add the following:
-.Sp
-.Vb 1
-\&       DATA_TYPE       NUMBER(*\e,0):bigint
-.Ve
-.Sp
-You don't have to recopy all default type conversion but just the one you want
-to rewrite.
-.Sp
-There's a special case with \s-1BFILE\s0 when they are converted to type \s-1TEXT\s0, they
-will just contains the full path to the external file. If you set the
-destination type to \s-1BYTEA\s0, the default, Ora2Pg will export the content of the
-\&\s-1BFILE\s0 as bytea. The third case is when you set the destination type to \s-1EFILE\s0,
-in this case, Ora2Pg will export it as an \s-1EFILE\s0 record: (\s-1DIRECTORY\s0, \s-1FILENAME\s0).
-Use the \s-1DIRECTORY\s0 export type to export the existing directories as well as
-privileges on those directories.
-.Sp
-There's no \s-1SQL\s0 function available to retrieve the path to the \s-1BFILE\s0, then Ora2Pg
-have to create one using the \s-1DBMS_LOB\s0 package.
-.Sp
-.Vb 10
-\&        CREATE OR REPLACE FUNCTION ora2pg_get_bfilename( p_bfile IN BFILE )
-\&        RETURN VARCHAR2
-\&          AS
-\&            l_dir   VARCHAR2(4000);
-\&            l_fname VARCHAR2(4000);
-\&            l_path  VARCHAR2(4000);
-\&          BEGIN
-\&            dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
-\&            SELECT directory_path INTO l_path FROM all_directories
-\&                WHERE directory_name = l_dir;
-\&            l_dir := rtrim(l_path,\*(Aq/\*(Aq);
-\&            RETURN l_dir || \*(Aq/\*(Aq || l_fname;
-\&          END;
-.Ve
-.Sp
-This function is only created if Ora2Pg found a table with a \s-1BFILE\s0 column and
-that the destination type is \s-1TEXT\s0. The function is dropped at the end of the
-export. This concern both, \s-1COPY\s0 and \s-1INSERT\s0 export type.
-.Sp
-There's no \s-1SQL\s0 function available to retrieve \s-1BFILE\s0 as an \s-1EFILE\s0 record, then
-Ora2Pg have to create one using the \s-1DBMS_LOB\s0 package.
-.Sp
-.Vb 9
-\&        CREATE OR REPLACE FUNCTION ora2pg_get_efile( p_bfile IN BFILE )
-\&        RETURN VARCHAR2
-\&          AS
-\&            l_dir   VARCHAR2(4000);
-\&            l_fname VARCHAR2(4000);
-\&          BEGIN
-\&            dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
-\&            RETURN \*(Aq(\*(Aq || l_dir || \*(Aq,\*(Aq || l_fnamei || \*(Aq)\*(Aq;
-\&          END;
-.Ve
-.Sp
-This function is only created if Ora2Pg found a table with a \s-1BFILE\s0 column and
-that the destination type is \s-1EFILE\s0. The function is dropped at the end of the
-export. This concern both, \s-1COPY\s0 and \s-1INSERT\s0 export type.
-.Sp
-To set the destination type, use the \s-1DATA_TYPE\s0 configuration directive:
-.Sp
-.Vb 1
-\&        DATA_TYPE       BFILE:EFILE
-.Ve
-.Sp
-for example.
-.Sp
-The \s-1EFILE\s0 type is a user defined type created by the PostgreSQL extension
-external_file that can be found here: https://github.com/darold/external_file
-This is a port of the \s-1BFILE\s0 Oracle type to PostgreSQL.
-.IP "\s-1MODIFY_TYPE\s0" 4
-.IX Item "MODIFY_TYPE"
-Some time you need to force the destination type, for example a column
-exported as timestamp by Ora2Pg can be forced into type date. Value is
-a comma-separated list of \s-1TABLE:COLUMN:TYPE\s0 structure. If you need to use
-comma or space inside type definition you will have to backslash them.
-.Sp
-.Vb 1
-\&        MODIFY_TYPE     TABLE1:COL3:varchar,TABLE1:COL4:decimal(9\e,6)
-.Ve
-.Sp
-Type of table1.col3 will be replaced by a varchar and table1.col4 by
-a decimal with precision.
 .SS "Taking export under control"
 .IX Subsection "Taking export under control"
-The following other configuration directives interact directly with the export process and give you fine granularity in database export control.
+The following other configuration directives interact directly with the export process and give you fine granuality in database export control.
 .IP "\s-1SKIP\s0" 4
 .IX Item "SKIP"
 For \s-1TABLE\s0 export you may not want to export all schema constraints, the \s-1SKIP\s0
@@ -1800,14 +1587,14 @@
 primary key names set this option to 1.
 .IP "\s-1FKEY_ADD_UPDATE\s0" 4
 .IX Item "FKEY_ADD_UPDATE"
-This directive allow you to add an \s-1ON\s0 \s-1UPDATE\s0 \s-1CASCADE\s0 option to a foreign
-key when a \s-1ON\s0 \s-1DELETE\s0 \s-1CASCADE\s0 is defined or always. Oracle do not support
-this feature, you have to use trigger to operate the \s-1ON\s0 \s-1UPDATE\s0 \s-1CASCADE\s0.
+This directive allow you to add an \s-1ON UPDATE CASCADE\s0 option to a foreign
+key when a \s-1ON DELETE CASCADE\s0 is defined or always. Oracle do not support
+this feature, you have to use trigger to operate the \s-1ON UPDATE CASCADE.\s0
 As PostgreSQL has this feature, you can choose how to add the foreign
 key option. There is three value to this directive: never, the default
 that mean that foreign keys will be declared exactly like in Oracle.
-The second value is delete, that mean that the \s-1ON\s0 \s-1UPDATE\s0 \s-1CASCADE\s0 option
-will be added only if the \s-1ON\s0 \s-1DELETE\s0 \s-1CASCADE\s0 is already defined on the
+The second value is delete, that mean that the \s-1ON UPDATE CASCADE\s0 option
+will be added only if the \s-1ON DELETE CASCADE\s0 is already defined on the
 foreign Keys. The last value, always, will force all foreign keys to be
 defined using the update option.
 .IP "\s-1FKEY_DEFERRABLE\s0" 4
@@ -1831,7 +1618,7 @@
 (\s-1TABLE\s0 export type).
 .IP "\s-1DROP_FKEY\s0" 4
 .IX Item "DROP_FKEY"
-If deferring foreign keys is not possible due to the amount of data in a
+If deferring foreign keys is not possible du to the amount of data in a
 single transaction, you've not exported foreign keys as deferrable or you
 are using direct import to PostgreSQL, you can use the \s-1DROP_FKEY\s0 directive.
 .Sp
@@ -1846,13 +1633,13 @@
 .IP "\s-1DISABLE_TRIGGERS\s0" 4
 .IX Item "DISABLE_TRIGGERS"
 This directive is used to disable triggers on all tables in \s-1COPY\s0 or \s-1INSERT\s0
-export modes. Available values are \s-1USER\s0 (disable user-defined triggers only)
-and \s-1ALL\s0 (includes \s-1RI\s0 system triggers). Default is 0: do not add \s-1SQL\s0 statements
+export modes. Available values are \s-1USER \s0(disable user-defined triggers only)
+and \s-1ALL \s0(includes \s-1RI\s0 system triggers). Default is 0: do not add \s-1SQL\s0 statements
 to disable trigger before data import.
 .Sp
 If you want to disable triggers during data migration, set the value to
 \&\s-1USER\s0 if your are connected as non superuser and \s-1ALL\s0 if you are connected
-as PostgreSQL superuser. A value of 1 is equal to \s-1USER\s0.
+as PostgreSQL superuser. A value of 1 is equal to \s-1USER.\s0
 .IP "\s-1DISABLE_SEQUENCE\s0" 4
 .IX Item "DISABLE_SEQUENCE"
 If set to 1 disables alter of sequences on all tables during \s-1COPY\s0 or \s-1INSERT\s0 export
@@ -1874,25 +1661,119 @@
 set to 0. This is the exact behavior of the same option in PostgreSQL.
 This directive is only used during data export to build \s-1INSERT\s0 statements.
 See \s-1NOESCAPE\s0 for enabling/disabling escape in \s-1COPY\s0 statements.
+.IP "\s-1PG_NUMERIC_TYPE\s0" 4
+.IX Item "PG_NUMERIC_TYPE"
+If set to 1 replace portable numeric type into PostgreSQL internal type.
+Oracle data type \s-1NUMBER\s0(p,s) is approximatively converted to real and
+float PostgreSQL data type. If you have monetary fields or don't want
+rounding issues with the extra decimals you should preserve the same
+numeric(p,s) PostgreSQL data type. Do that only if you need very good
+precision because using numeric(p,s) is slower than using real or double.
+.IP "\s-1PG_INTEGER_TYPE\s0" 4
+.IX Item "PG_INTEGER_TYPE"
+If set to 1 replace portable numeric type into PostgreSQL internal type.
+Oracle data type \s-1NUMBER\s0(p) or \s-1NUMBER\s0 are converted to smallint, integer
+or bigint PostgreSQL data type following the length of the precision. If
+\&\s-1NUMBER\s0 without precision are set to \s-1DEFAULT_NUMERIC \s0(see bellow).
+.IP "\s-1DEFAULT_NUMERIC\s0" 4
+.IX Item "DEFAULT_NUMERIC"
+\&\s-1NUMBER\s0 without precision are converted by default to bigint only if
+\&\s-1PG_INTEGER_TYPE\s0 is true. You can overwrite this value to any \s-1PG\s0 type,
+like integer or float.
+.IP "\s-1DATA_TYPE\s0" 4
+.IX Item "DATA_TYPE"
+If you're experiencing any problem in data type schema conversion with this
+directive you can take full control of the correspondence between Oracle and
+PostgreSQL types to redefine data type translation used in Ora2pg. The syntax
+is a comma-separated list of \*(L"Oracle datatype:Postgresql datatype\*(R". Here are
+the default list used:
+.Sp
+.Vb 1
+\&        DATA_TYPE       DATE:timestamp,LONG:text,LONG RAW:bytea,CLOB:text,NCLOB:text,BLOB:bytea,BFILE:bytea,RAW:bytea,ROWID:oid,FLOAT:double precision,DEC:decimal,DECIMAL:decimal,DOUBLE PRECISION:double precision,INT:numeric,INTEGER:numeric,REAL:real,SMALLINT:smallint,BINARY_FLOAT:double precision,BINARY_DOUBLE:double precision,TIMESTAMP:timestamp,XMLTYPE:xml,BINARY_INTEGER:integer,PLS_INTEGER:integer,TIMESTAMP WITH TIME ZONE:timestamp with time zone,TIMESTAMP WITH LOCAL TIME ZONE:timestamp with time zone
+.Ve
+.Sp
+Note that the directive and the list definition must be a single line.
+.Sp
+There's a special case with \s-1BFILE\s0 when they are converted to type \s-1TEXT,\s0 they
+will just contains the full path to the external file. If you set the
+destination type to \s-1BYTEA,\s0 the default, Ora2Pg will export the content of the
+\&\s-1BFILE\s0 as bytea. The third case is when you set the destination type to \s-1EFILE,\s0
+in this case, Ora2Pg will export it as an \s-1EFILE\s0 record: (\s-1DIRECTORY, FILENAME\s0).
+Use the \s-1DIRECTORY\s0 export type to export the existing directories as well as
+priviledge on those directories.
+.Sp
+There's no \s-1SQL\s0 function available to retrieve the path to the \s-1BFILE,\s0 then Ora2Pg
+have to create one using the \s-1DBMS_LOB\s0 package.
+.Sp
+.Vb 10
+\&        CREATE OR REPLACE FUNCTION ora2pg_get_bfilename( p_bfile IN BFILE )
+\&        RETURN VARCHAR2
+\&          AS
+\&            l_dir   VARCHAR2(4000);
+\&            l_fname VARCHAR2(4000);
+\&            l_path  VARCHAR2(4000);
+\&          BEGIN
+\&            dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
+\&            SELECT directory_path INTO l_path FROM all_directories
+\&                WHERE directory_name = l_dir;
+\&            l_dir := rtrim(l_path,\*(Aq/\*(Aq);
+\&            RETURN l_dir || \*(Aq/\*(Aq || l_fname;
+\&          END;
+.Ve
+.Sp
+This function is only created if Ora2Pg found a table with a \s-1BFILE\s0 column and
+that the destination type is \s-1TEXT.\s0 The function is dropped at the end of the
+export. This concern both, \s-1COPY\s0 and \s-1INSERT\s0 export type.
+.Sp
+There's no \s-1SQL\s0 function available to retrieve \s-1BFILE\s0 as an \s-1EFILE\s0 record, then
+Ora2Pg have to create one using the \s-1DBMS_LOB\s0 package.
+.Sp
+.Vb 9
+\&        CREATE OR REPLACE FUNCTION ora2pg_get_efile( p_bfile IN BFILE )
+\&        RETURN VARCHAR2
+\&          AS
+\&            l_dir   VARCHAR2(4000);
+\&            l_fname VARCHAR2(4000);
+\&          BEGIN
+\&            dbms_lob.FILEGETNAME( p_bfile, l_dir, l_fname );
+\&            RETURN \*(Aq(\*(Aq || l_dir || \*(Aq,\*(Aq || l_fnamei || \*(Aq)\*(Aq;
+\&          END;
+.Ve
+.Sp
+This function is only created if Ora2Pg found a table with a \s-1BFILE\s0 column and
+that the destination type is \s-1EFILE.\s0 The function is dropped at the end of the
+export. This concern both, \s-1COPY\s0 and \s-1INSERT\s0 export type.
+.Sp
+To set the destination type, use the \s-1DATA_TYPE\s0 configuration directive:
+.Sp
+.Vb 1
+\&        DATA_TYPE       BFILE:EFILE
+.Ve
+.Sp
+for example.
+.Sp
+The \s-1EFILE\s0 type is a user defined type created by the PostgreSQL extension
+external_file that can be found here: https://github.com/darold/external_file
+This is a port of the \s-1BFILE\s0 Oracle type to PostgreSQL.
 .IP "\s-1TRIM_TYPE\s0" 4
 .IX Item "TRIM_TYPE"
 If you want to convert \s-1CHAR\s0(n) from Oracle into varchar(n) or text on PostgreSQL
-using directive \s-1DATA_TYPE\s0, you might want to do some triming on the data. By
-default Ora2Pg will auto-detect this conversion and remove any whitespace at both
+using directive \s-1DATA_TYPE,\s0 you might want to do some triming on the data. By
+default Ora2Pg will auto-detect this conversion and remove any withspace at both
 leading and trailing position. If you just want to remove the leadings character
-set the value to \s-1LEADING\s0. If you just want to remove the trailing character, set
-the value to \s-1TRAILING\s0. Default value is \s-1BOTH\s0.
+set the value to \s-1LEADING.\s0 If you just want to remove the trailing character, set
+the value to \s-1TRAILING.\s0 Default value is \s-1BOTH.\s0
 .IP "\s-1TRIM_CHAR\s0" 4
 .IX Item "TRIM_CHAR"
-The default trimming character is space, use this directive if you need to
+The default triming character is space, use this directive if you need to
 change the character that will be removed. For example, set it to \- if you
-have leading \- in the char(n) field. To use space as trimming charger, comment
+have leading \- in the char(n) field. To use space as triming charger, comment
 this directive, this is the default value.
 .IP "\s-1PRESERVE_CASE\s0" 4
 .IX Item "PRESERVE_CASE"
 If you want to preserve the case of Oracle object name set this directive to 1.
 By default Ora2Pg will convert all Oracle object names to lower case.  I do not
-recommend to enable this unless you will always have to double-quote object
+recommand to enable this unless you will always have to double-quote object
 names on all your \s-1SQL\s0 scripts.
 .IP "\s-1ORA_RESERVED_WORDS\s0" 4
 .IX Item "ORA_RESERVED_WORDS"
@@ -1909,26 +1790,26 @@
 .IP "\s-1PG_SUPPORTS_MVIEW\s0" 4
 .IX Item "PG_SUPPORTS_MVIEW"
 Since PostgreSQL 9.3, materialized view are supported with the \s-1SQL\s0 syntax
-\&'\s-1CREATE\s0 \s-1MATERIALIZED\s0 \s-1VIEW\s0'. To force Ora2Pg to use the native PostgreSQL
+\&'\s-1CREATE MATERIALIZED VIEW\s0'. To force Ora2Pg to use the native PostgreSQL
 support you must enable this configuration \- enable by default. If you want
 to use the old style with table and a set of function, you should disable it.
 .IP "\s-1PG_SUPPORTS_IFEXISTS\s0" 4
 .IX Item "PG_SUPPORTS_IFEXISTS"
-PostgreSQL version below 9.x do not support \s-1IF\s0 \s-1EXISTS\s0 in \s-1DDL\s0 statements.
+PostgreSQL version below 9.x do not support \s-1IF EXISTS\s0 in \s-1DDL\s0 statements.
 Disabling the directive with value 0 will prevent Ora2Pg to add those
-keywords in all generated statements. Default value is 1, enabled.
-.IP "\s-1PG_SUPPORTS_ROLE\s0 (Deprecated)" 4
+keywords in all generated statments. Default value is 1, enabled.
+.IP "\s-1PG_SUPPORTS_ROLE \s0(Deprecated)" 4
 .IX Item "PG_SUPPORTS_ROLE (Deprecated)"
 This option is deprecated since Ora2Pg release v7.3.
 .Sp
 By default Oracle roles are translated into PostgreSQL groups. If you have
 PostgreSQL 8.1 or more consider the use of \s-1ROLES\s0 and set this directive to 1
 to export roles.
-.IP "\s-1PG_SUPPORTS_INOUT\s0 (Deprecated)" 4
+.IP "\s-1PG_SUPPORTS_INOUT \s0(Deprecated)" 4
 .IX Item "PG_SUPPORTS_INOUT (Deprecated)"
 This option is deprecated since Ora2Pg release v7.3.
 .Sp
-If set to 0, all \s-1IN\s0, \s-1OUT\s0 or \s-1INOUT\s0 parameters will not be used into the generated
+If set to 0, all \s-1IN, OUT\s0 or \s-1INOUT\s0 parameters will not be used into the generated
 PostgreSQL function declarations (disable it for PostgreSQL database version
 lower than 8.1), This is now enable by default.
 .IP "\s-1PG_SUPPORTS_DEFAULT\s0" 4
@@ -1936,25 +1817,25 @@
 This directive enable or disable the use of default parameter value in function
 export. Until PostgreSQL 8.4 such a default value was not supported, this feature
 is now enable by default.
-.IP "\s-1PG_SUPPORTS_WHEN\s0 (Deprecated)" 4
+.IP "\s-1PG_SUPPORTS_WHEN \s0(Deprecated)" 4
 .IX Item "PG_SUPPORTS_WHEN (Deprecated)"
 Add support to \s-1WHEN\s0 clause on triggers as PostgreSQL v9.0 now support it. This
 directive is enabled by default, set it to 0 disable this feature.
-.IP "\s-1PG_SUPPORTS_INSTEADOF\s0 (Deprecated)" 4
+.IP "\s-1PG_SUPPORTS_INSTEADOF \s0(Deprecated)" 4
 .IX Item "PG_SUPPORTS_INSTEADOF (Deprecated)"
-Add support to \s-1INSTEAD\s0 \s-1OF\s0 usage on triggers (used with \s-1PG\s0 >= 9.1), if this
-directive is disabled the \s-1INSTEAD\s0 \s-1OF\s0 triggers will be rewritten as Pg rules.
+Add support to \s-1INSTEAD OF\s0 usage on triggers (used with \s-1PG \s0>= 9.1), if this
+directive is disabled the \s-1INSTEAD OF\s0 triggers will be rewritten as Pg rules.
 .IP "\s-1PG_SUPPORTS_CHECKOPTION\s0" 4
 .IX Item "PG_SUPPORTS_CHECKOPTION"
-When enabled, export views with \s-1CHECK\s0 \s-1OPTION\s0. Disable it if you have PostgreSQL
+When enabled, export views with \s-1CHECK OPTION.\s0 Disable it if you have PostgreSQL
 version prior to 9.4. Default: 1, enabled.
 .IP "\s-1PG_SUPPORTS_IFEXISTS\s0" 4
 .IX Item "PG_SUPPORTS_IFEXISTS"
-If disabled, do not export object with \s-1IF\s0 \s-1EXISTS\s0 statements.
+If disabled, do not export object with \s-1IF EXISTS\s0 statements.
 Enabled by default.
 .IP "\s-1BITMAP_AS_GIN\s0" 4
 .IX Item "BITMAP_AS_GIN"
-Use btree_gin extension to create bitmap like index with pg >= 9.4
+Use btree_gin extenstion to create bitmap like index with pg >= 9.4
 You will need to create the extension by yourself:
       create extension btree_gin;
 Default is to create \s-1GIN\s0 index, when disabled, a btree index will be created
@@ -1968,7 +1849,7 @@
 Use this directive to set the database handle's 'LongReadLen' attribute to a
 value that will be the larger than the expected size of the LOBs. The default
 is 1MB witch may not be enough to extract BLOBs or CLOBs. If the size of the
-\&\s-1LOB\s0 exceeds the 'LongReadLen' DBD::Oracle will return a '\s-1ORA\-24345:\s0 A Truncation'
+\&\s-1LOB\s0 exceeds the 'LongReadLen' DBD::Oracle will return a '\s-1ORA\-24345: A\s0 Truncation'
 error. Default: 1023*1024 bytes.
 .Sp
 Take a look at this page to learn more: http://search.cpan.org/~pythian/DBD\-Oracle\-1.22/Oracle.pm#Data_Interface_for_Persistent_LOBs
@@ -1980,14 +1861,14 @@
 \&\s-1DATA_LIMIT\s0 to 500 or lower, otherwise you may experience some out of memory.
 .IP "\s-1LONGTRUNKOK\s0" 4
 .IX Item "LONGTRUNKOK"
-If you want to bypass the '\s-1ORA\-24345:\s0 A Truncation' error, set this directive
+If you want to bypass the '\s-1ORA\-24345: A\s0 Truncation' error, set this directive
 to 1, it will truncate the data extracted to the LongReadLen value. Disable
 by default so that you will be warned if your LongReadLen value is not high
 enough.
 .IP "\s-1NO_LOB_LOCATOR\s0" 4
 .IX Item "NO_LOB_LOCATOR"
 Disable this if you don't want to load full content of \s-1BLOB\s0 and \s-1CLOB\s0 and use
-\&\s-1LOB\s0 locators instead. This is useful to not having to set \s-1LONGREADLEN\s0. Note
+\&\s-1LOB\s0 locators instead. This is usefull to not having to set \s-1LONGREADLEN.\s0 Note
 that this will not improve speed of \s-1BLOB\s0 export as most of the time is always
 consumed by the bytea escaping and in this case data will be processed line
 by line and not by chunk of \s-1DATA_LIMIT\s0 rows. For more information on how it
@@ -1996,7 +1877,7 @@
 .IP "\s-1XML_PRETTY\s0" 4
 .IX Item "XML_PRETTY"
 Force the use \fIgetStringVal()\fR instead of \fIgetClobVal()\fR for \s-1XML\s0 data export. Default is 1,
-enabled for backward compatibility. Set it to 0 to use extract method a la \s-1CLOB\s0.
+enabled for backward compatibility. Set it to 0 to use extract method a la \s-1CLOB.\s0
 .IP "\s-1ENABLE_MICROSECOND\s0" 4
 .IX Item "ENABLE_MICROSECOND"
 Set it to O if you want to disable export of millisecond from Oracle timestamp
@@ -2022,19 +1903,19 @@
 .IP "\s-1MYSQL_PIPES_AS_CONCAT\s0" 4
 .IX Item "MYSQL_PIPES_AS_CONCAT"
 Enable this if double pipe and double ampersand (|| and &&) should not be
-taken as equivalent to \s-1OR\s0 and \s-1AND\s0. It depend of the variable \f(CW@sql_mode\fR,
+taken as equivalent to \s-1OR\s0 and \s-1AND.\s0 It depend of the variable \f(CW@sql_mode\fR,
 Use it only if Ora2Pg fail on auto detecting this behavior.
 .IP "\s-1MYSQL_INTERNAL_EXTRACT_FORMAT\s0" 4
 .IX Item "MYSQL_INTERNAL_EXTRACT_FORMAT"
 Enable this directive if you want \s-1\fIEXTRACT\s0()\fR replacement to use the internal
-format returned as an integer, for example \s-1DD\s0 \s-1HH24:MM:SS\s0 will be replaced
+format returned as an integer, for example \s-1DD HH24:MM:SS\s0 will be replaced
 with format; DDHH24MMSS::bigint, this depend of your apps usage.
 .SS "Special options to handle character encoding"
 .IX Subsection "Special options to handle character encoding"
 .IP "\s-1NLS_LANG\s0 and \s-1NLS_NCHAR\s0" 4
 .IX Item "NLS_LANG and NLS_NCHAR"
-By default Ora2Pg will set \s-1NLS_LANG\s0 to \s-1AMERICAN_AMERICA\s0.AL32UTF8 and \s-1NLS_NCHAR\s0
-to \s-1AL32UTF8\s0. It is not recommanded to change those settings but in some case it
+By default Ora2Pg will set \s-1NLS_LANG\s0 to \s-1AMERICAN_AMERICA.AL32UTF8\s0 and \s-1NLS_NCHAR\s0
+to \s-1AL32UTF8.\s0 It is not recommanded to change those settings but in some case it
 could be useful. Using your own settings with those configuration directive will
 change the client encoding at Oracle side by setting the environment variables
 \&\f(CW$ENV\fR{\s-1NLS_LANG\s0} and \f(CW$ENV\fR{\s-1NLS_NCHAR\s0}.
@@ -2067,7 +1948,7 @@
 You can take a look at the PostgreSQL supported character sets here: http://www.postgresql.org/docs/9.0/static/multibyte.html
 .SS "\s-1PLSQL\s0 to \s-1PLPSQL\s0 convertion"
 .IX Subsection "PLSQL to PLPSQL convertion"
-Automatic code conversion from Oracle \s-1PLSQL\s0 to PostgreSQL \s-1PLPGSQL\s0 is a work in
+Automatic code convertion from Oracle \s-1PLSQL\s0 to PostgreSQL \s-1PLPGSQL\s0 is a work in
 progress in Ora2Pg and surely you will always have manual work. The Perl code
 used for automatic conversion is all stored in a specific Perl Module named
 Ora2Pg/PLSQL.pm feel free to modify/add you own code and send me patches. The
@@ -2075,12 +1956,12 @@
 parameters rewrite.
 .IP "\s-1PLSQL_PGSQL\s0" 4
 .IX Item "PLSQL_PGSQL"
-Enable/disable \s-1PLSQL\s0 to \s-1PLPSQL\s0 conversion. Enabled by default.
+Enable/disable \s-1PLSQL\s0 to \s-1PLPSQL\s0 convertion. Enabled by default.
 .IP "\s-1NULL_EQUAL_EMPTY\s0" 4
 .IX Item "NULL_EQUAL_EMPTY"
 Ora2Pg can replace all conditions with a test on \s-1NULL\s0 by a call to the
 \&\fIcoalesce()\fR function to mimic the Oracle behavior where empty string are
-considered equal to \s-1NULL\s0.
+considered equal to \s-1NULL.\s0
 .Sp
 .Vb 2
 \&        (field1 IS NULL) is replaced by (coalesce(field1::text, \*(Aq\*(Aq) = \*(Aq\*(Aq)
@@ -2108,23 +1989,11 @@
 The replacement will be done in all kind of \s-1DDL\s0 or code that is parsed by
 the \s-1PLSQL\s0 to \s-1PLPGSQL\s0 converter. \s-1PLSQL_PGSQL\s0 must be enabled or \-p used in
 command line.
-.IP "\s-1REWRITE_OUTER_JOIN\s0" 4
-.IX Item "REWRITE_OUTER_JOIN"
-Enable this directive if the rewrite of Oracle native syntax (+) of
-\&\s-1OUTER\s0 \s-1JOIN\s0 is broken. This will force Ora2Pg to not rewrite such code,
-default is to try to rewrite simple form of rigth outer join for the
-moment.
-.IP "\s-1UUID_FUNCTION\s0" 4
-.IX Item "UUID_FUNCTION"
-By default Ora2Pg will convert call to \s-1\fISYS_GUID\s0()\fR Oracle function
-with a call to uuid_generate_v4 from uuid-ossp extension. You can
-redefined it to use the gen_random_uuid function from pgcrypto
-extension by changing the function name. Default to uuid_generate_v4.
 .SS "Materialized view"
 .IX Subsection "Materialized view"
-Since PostgreSQL 9.3, materialized view are supported with the \s-1CREATE\s0 \s-1MATERIALIZED\s0 \s-1VIEW\s0 syntax,
+Since PostgreSQL 9.3, materialized view are supported with the \s-1CREATE MATERIALIZED VIEW\s0 syntax,
 to force Ora2Pg to use the native PostgreSQL support you must enable the configuration
-directive \s-1PG_SUPPORTS_MVIEW\s0.
+directive \s-1PG_SUPPORTS_MVIEW.\s0
 .PP
 In other case Ora2Pg will export all materialized views as \*(L"Snapshot Materialized Views\*(R" as explain
 in this document: http://tech.jonathangardner.net/wiki/PostgreSQL/Materialized_Views.
@@ -2154,12 +2023,12 @@
 \&        CREATE VIEW mviewname_mview AS
 \&        SELECT ... FROM ...;
 \&
-\&        SELECT create_materialized_view(\*(Aqmviewname\*(Aq,\*(Aqmviewname_mview\*(Aq, change with the name of the column to used for the index);
+\&        SELECT create_materialized_view(\*(Aqmviewname\*(Aq,\*(Aqmviewname_mview\*(Aq, change with the name of the colum to used for the index);
 .Ve
 .PP
-The first argument is the name of the materialized view, the second the name of the view on which the materialized view is based
+The first argument is the name of the materializd view, the second the name of the view on which the materialized view is based
 and the third is the column name on which the index should be build (aka most od the time the primary key). This column is not
-automatically deduced so you need to replace its name.
+automatically deduced so you need to repace its name.
 .PP
 As said above Ora2Pg only supports snapshot materialized views so the table will be entirely refreshed by issuing first a truncate
 of the table and then by load again all data from the view:
@@ -2209,7 +2078,7 @@
 \&        GROUP BY category_id;
 .Ve
 .PP
-Setting \s-1VIEW_AS_TABLE\s0 to product_prices and using export type \s-1TABLE\s0, will
+Setting \s-1VIEW_AS_TABLE\s0 to product_prices and using export type \s-1TABLE,\s0 will
 force Ora2Pg to detect columns returned types and to generate a create table
 statement:
 .PP
@@ -2238,7 +2107,7 @@
 \&        ora2pg \-c ora2pg.conf \-t KETTLE \-j 12 \-a MYTABLE \-o load_mydata.sh
 .Ve
 .PP
-will generate one file called '\s-1HR\s0.MYTABLE.ktr' and add a line to the output
+will generate one file called '\s-1HR.MYTABLE\s0.ktr' and add a line to the output
 file (load_mydata.sh):
 .PP
 .Vb 1
@@ -2351,7 +2220,7 @@
 \&        \-\-estimate_cost
 .Ve
 .PP
-This feature can only be used with the \s-1SHOW_REPORT\s0, \s-1FUNCTION\s0, \s-1PROCEDURE\s0, \s-1PACKAGE\s0
+This feature can only be used with the \s-1SHOW_REPORT, FUNCTION, PROCEDURE, PACKAGE\s0
 and \s-1QUERY\s0 export type.
 .PP
 .Vb 1
@@ -2439,10 +2308,10 @@
 .Ve
 .PP
 This assessment consist in a letter A or B to specify if the migration needs
-manual rewriting or not. And a number from 1 up to 5 to give you a technical
+manual rewritting or not. And a number from 1 up to 5 to give you a technical
 difficulty level. You have an additional option \-\-human_days_limit to specify
 the number of human-days limit where the migration level should be set to C
-to indicate that it need a huge amount of work and a full project management
+to indicate that it need a huge amount of work and a full project managment
 with migration support. Default is 10 human-days. You can use the configuration
 directive \s-1HUMAN_DAYS_LIMIT\s0 to change this default value permanently.
 .PP
@@ -2482,7 +2351,7 @@
 .Ve
 .PP
 It will generate a \s-1CSV\s0 file with the assessment result, one line per schema or
-database and a detailed \s-1HTML\s0 report for each database scanned.
+database and a detailled \s-1HTML\s0 report for each database scanned.
 .PP
 Hint: Use the \-t | \-\-test option before to test all your connections in your
 \&\s-1CSV\s0 file.
@@ -2498,12 +2367,12 @@
 in the same Perl library lib/Ora2Pg/PLSQL.pm in the hash \f(CW%UNCOVERED_SCORE\fR initialization.
 .PP
 This assessment method is a work in progress so I'm expecting feedbacks on migration
-experiences to polish the scores/units attributed in those variables.
+experiences to polish the scores/units attribued in those variables.
 .SS "Improving indexes and constraints creation speed"
 .IX Subsection "Improving indexes and constraints creation speed"
 Using the \s-1LOAD\s0 export type and a file containing \s-1SQL\s0 orders to perform, it is
 possible to dispatch those orders over multiple PostgreSQL connections. To be
-able to use this feature, the \s-1PG_DSN\s0, \s-1PG_USER\s0 and \s-1PG_PWD\s0 must be set. Then:
+able to use this feature, the \s-1PG_DSN, PG_USER\s0 and \s-1PG_PWD\s0 must be set. Then:
 .PP
 .Vb 1
 \&        ora2pg \-t LOAD \-c config/ora2pg.conf \-i schema/tables/INDEXES_table.sql \-j 4
@@ -2513,9 +2382,9 @@
 .PP
 This will considerably accelerate this part of the migration process with huge
 data size.
-.SS "Exporting \s-1LONG\s0 \s-1RAW\s0"
+.SS "Exporting \s-1LONG RAW\s0"
 .IX Subsection "Exporting LONG RAW"
-If you still have columns defined as \s-1LONG\s0 \s-1RAW\s0, Ora2Pg will not be able to export
+If you still have columns defined as \s-1LONG RAW,\s0 Ora2Pg will not be able to export
 these kind of data. The \s-1OCI\s0 library fail to export them and always return the
 same first record. To be able to export the data you need to transform the field
 as \s-1BLOB\s0 by creating a temporary table before migrating data. For example, the
@@ -2532,7 +2401,7 @@
 need to be \*(L"translated\*(R" into a table using \s-1BLOB\s0 as follow:
 .PP
 .Vb 1
-\&        CREATE TABLE test_blob (id NUMBER, c1 BLOB);
+\&        CREATE TABLE test_blob (id NUMNER, c1 BLOB);
 .Ve
 .PP
 And then copy the data with the following \s-1INSERT\s0 query:
@@ -2544,32 +2413,6 @@
 Then you just have to exclude the original table from the export (see \s-1EXCLUDE\s0
 directive) and to renamed the new temporary table on the fly using the
 \&\s-1REPLACE_TABLES\s0 configuration directive.
-.SS "Global variables"
-.IX Subsection "Global variables"
-Oracle allow the use of global variables defined in packages. Ora2Pg will
-export these variables for PostgreSQL as user defined custom variables
-available in a session. Oracle variables assignement are exported as
-call to:
-.PP
-.Vb 1
-\&    PERFORM set_config(\*(Aqpkgname.varname\*(Aq, value, false);
-.Ve
-.PP
-Use of these variables in the code is replaced by:
-.PP
-.Vb 1
-\&    current_setting(\*(Aqpkgname.varname\*(Aq)::global_variables_type;
-.Ve
-.PP
-where global_variables_type is the type of the variable extracted from
-the package definition.
-.PP
-If the variable is a constant or have a default value assigned at
-declaration, ora2pg will create a file global_variables.conf with
-the definition to include in the postgresql.conf file so that their
-values will already be set at database connection. Note that the
-value can always modified by the user so you can not have exactly
-a constant.
 .SS "Hints"
 .IX Subsection "Hints"
 Converting your queries with Oracle style outer join (+) syntax to \s-1ANSI\s0 standard \s-1SQL\s0 at
@@ -2580,7 +2423,7 @@
 http://www.thatjeffsmith.com/archive/2012/01/sql\-developer\-data\-modeler\-quick\-tip\-use\-oracle\-join\-syntax\-or\-ansi/
 .PP
 Toad is also able to rewrite the native Oracle \s-1\fIDECODE\s0()\fR syntax into \s-1ANSI\s0
-standard \s-1SQL\s0 \s-1CASE\s0 statement. You can find some slide about this in a
+standard \s-1SQL CASE\s0 statement. You can find some slide about this in a
 presentation given at PgConf.RU: http://ora2pg.darold.net/slides/ora2pg_the_hard_way.pdf
 .SS "Test the migration"
 .IX Subsection "Test the migration"
@@ -2691,7 +2534,7 @@
 your ideas, features request or patches and there will be applied.
 .SH "LICENSE"
 .IX Header "LICENSE"
-Copyright (c) 2000\-2017 Gilles Darold \- All rights reserved.
+Copyright (c) 2000\-2016 Gilles Darold \- All rights reserved.
 .PP
 .Vb 4
 \&        This program is free software: you can redistribute it and/or modify
@@ -2709,4 +2552,4 @@
 .Ve
 .SH "ACKNOWLEDGEMENT"
 .IX Header "ACKNOWLEDGEMENT"
-I must thanks a lot all the great contributors, see changelog for all acknowledgments.
+I must thanks a lot all the great contributors, see changelog for all acknowledgements.
diff -Nru ora2pg-18.0/lib/Ora2Pg/GEOM.pm ora2pg-17.6/lib/Ora2Pg/GEOM.pm
--- ora2pg-18.0/lib/Ora2Pg/GEOM.pm	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/lib/Ora2Pg/GEOM.pm	2016-11-18 05:45:49.000000000 +0800
@@ -4,7 +4,7 @@
 # Name     : Ora2Pg/GEOM.pm
 # Language : Perl
 # Authors  : Gilles Darold, gilles _AT_ darold _DOT_ net
-# Copyright: Copyright (c) 2000-2017 : Gilles Darold - All rights reserved -
+# Copyright: Copyright (c) 2000-2016 : Gilles Darold - All rights reserved -
 # Function : Perl module used to convert Oracle SDO_GEOMETRY into PostGis
 # Usage    : See documentation
 #------------------------------------------------------------------------------
@@ -40,7 +40,7 @@
 
 use strict;
 
-$VERSION = '18.0';
+$VERSION = '17.6';
 
 # SDO_ETYPE
 # Second element of triplet in SDO_ELEM_INFO
diff -Nru ora2pg-18.0/lib/Ora2Pg/MySQL.pm ora2pg-17.6/lib/Ora2Pg/MySQL.pm
--- ora2pg-18.0/lib/Ora2Pg/MySQL.pm	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/lib/Ora2Pg/MySQL.pm	2016-11-18 05:45:49.000000000 +0800
@@ -9,7 +9,7 @@
 setlocale(LC_NUMERIC,"C");
 
 
-$VERSION = '18.0';
+$VERSION = '17.6';
 
 # These definitions can be overriden from configuration file
 our %MYSQL_TYPE = (
@@ -82,25 +82,6 @@
 	$sth;
 }
 
-sub _table_exists
-{
-	my ($self, $schema, $table) = @_;
-
-	my $ret = '';
-
-	my $sql = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_SCHEMA = '$schema' AND TABLE_NAME = '$table'";
-
-	my $sth = $self->{dbh}->prepare( $sql ) or return undef;
-	$sth->execute or return undef;
-	while ( my @row = $sth->fetchrow()) {
-		$ret = $row[0];
-	}
-	$sth->finish();
-
-	return $ret;
-}
-
-
 
 =head2 _get_encoding
 
diff -Nru ora2pg-18.0/lib/Ora2Pg/PLSQL.pm ora2pg-17.6/lib/Ora2Pg/PLSQL.pm
--- ora2pg-18.0/lib/Ora2Pg/PLSQL.pm	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/lib/Ora2Pg/PLSQL.pm	2016-11-18 05:45:49.000000000 +0800
@@ -4,7 +4,7 @@
 # Name     : Ora2Pg/PLSQL.pm
 # Language : Perl
 # Authors  : Gilles Darold, gilles _AT_ darold _DOT_ net
-# Copyright: Copyright (c) 2000-2017 : Gilles Darold - All rights reserved -
+# Copyright: Copyright (c) 2000-2016 : Gilles Darold - All rights reserved -
 # Function : Perl module used to convert Oracle PLSQL code into PL/PGSQL
 # Usage    : See documentation
 #------------------------------------------------------------------------------
@@ -31,7 +31,7 @@
 setlocale(LC_NUMERIC,"C");
 
 
-$VERSION = '18.0';
+$VERSION = '17.6';
 
 #----------------------------------------------------
 # Cost scores used when converting PLSQL to PLPGSQL
@@ -60,7 +60,7 @@
 	'DIMENSION' => 0, # Not supported and no equivalent
 	'JOB' => 2, # read/adapt
 	'SYNONYM' => 0.1, # read/adapt
-	'QUERY' => 0.2, # read/adapt
+	'QUERY' => 0.02, # read/adapt
 );
 
 # Scores following the number of characters: 1000 chars for one unit.
@@ -77,7 +77,7 @@
 	'TRUNC' => 0.1,
 	'DECODE' => 1,
 	'IS TABLE OF' => 4,
-	'OUTER JOIN' => 2,
+	'OUTER JOIN' => 1,
 	'CONNECT BY' => 4,
 	'BULK COLLECT' => 3,
 	'GOTO' => 2,
@@ -99,7 +99,7 @@
 	'TO_NUMBER' => 0.1,
 	'REGEXP_LIKE' => 0.1,
 	'TG_OP' => 1,
-	'CURSOR' => 0.2,
+	'CURSOR' => 1,
 	'PIPE ROW' => 1,
 	'ORA_ROWSCN' => 3,
 	'SAVEPOINT' => 1,
@@ -111,7 +111,7 @@
 	'PLVLEX' => 2,
 	'PLUNIT' => 2,
 	'ADD_MONTHS' => 0.1,
-	'LAST_DAY' => 1,
+	'LAST_DATE' => 1,
 	'NEXT_DAY' => 1,
 	'MONTHS_BETWEEN' => 1,
 	'NVL2' => 1,
@@ -120,12 +120,6 @@
 	'MDSYS' => 1,
 	'MERGE INTO' => 3,
 	'COMMIT' => 3,
-	'CONTAINS' => 1,
-	'SCORE' => 1,
-	'FUZZY' => 1,
-	'NEAR' => 1,
-	'TO_CHAR' => 0.1,
-	'ANYDATA' => 2,
 );
 
 @ORA_FUNCTIONS = qw(
@@ -300,68 +294,6 @@
 	configuration option to 1.
 =cut
 
-=head2 convert_plsql_code
-
-Main function used to convert Oracle SQL and PL/SQL code into PostgreSQL
-compatible code
-
-=cut
-
-sub convert_plsql_code
-{
-        my ($class, $str) = @_;
-
-	%{$class->{single_fct_call}} = ();
-
-	# Extract all block from the code by splitting it on the semi-comma
-	# character and replace all necessary function call
-	my @code_parts = split(/;/, $str);
-	for (my $i = 0; $i <= $#code_parts; $i++) {
-		next if (!$code_parts[$i]);
-		%{$class->{single_fct_call}} = ();
-		$code_parts[$i] = extract_function_code($class, $code_parts[$i], 0);
-		foreach my $k (keys %{$class->{single_fct_call}}) {
-			$class->{single_fct_call}{$k} = replace_oracle_function($class, $class->{single_fct_call}{$k});
-		};
-		while ($code_parts[$i] =~ s/\%\%REPLACEFCT(\d+)\%\%/$class->{single_fct_call}{$1}/) {};
-	}
-	$str = join(';', @code_parts);
-	$str =~ s/[;]+/;/gs;
-
-	# Apply code rewrite on other part of the code
-	$str = plsql_to_plpgsql($class, $str);
-
-	return $str;
-
-}
-
-=head2 extract_function_code
-
-Recursive function used to extract call to function in Oracle SQL
-and PL/SQL code
-
-=cut
-
-sub extract_function_code
-{
-        my ($class, $code, $idx) = @_;
-
-        # Look for a function call that do not have an other function
-        # call inside, replace content with a marker and store the
-        # replaced string into a hask to rewritten later to convert pl/sql
-        if ($code =~ s/\b([a-zA-Z\.\_]+)\s*\(([^\(\)]*)\)/\%\%REPLACEFCT$idx\%\%/s) {
-		my $fct_name = $1;
-		my $fct_code = $2;
-		my $space = '';
-		$space = ' ' if (grep (/^$fct_name$/i, 'FROM', 'AS', 'VALUES', 'DEFAULT', 'OR', 'AND', 'IN'));
-                # recursively replace function
-                $class->{single_fct_call}{$idx} = $fct_name . $space . '(' . $fct_code . ')';
-                $code = extract_function_code($class, $code, ++$idx);
-        }
-
-        return $code;
-}
-
 =head2 plsql_to_plpgsql
 
 This function return a PLSQL code translated to PLPGSQL code
@@ -374,6 +306,7 @@
 
 	return mysql_to_plpgsql($class, $str) if ($class->{is_mysql});
 
+	my @xmlelt = ();
 	my $field = '\s*([^\(\),]+)\s*';
 	my $num_field = '\s*([\d\.]+)\s*';
 	my $date_field = '\s*([^,\)\(]*(?:date|time)[^,\)\(]*)\s*';
@@ -382,6 +315,8 @@
 	# PL/SQL to PL/PGSQL code conversion
 	# Feel free to add your contribution here.
 	#--------------------------------------------
+	# Change NVL to COALESCE
+	$str =~ s/NVL\s*\(/coalesce(/igs;
 	my $conv_current_time = 'clock_timestamp()';
 	if (!grep(/$class->{type}/i, 'FUNCTION', 'PROCEDURE', 'PACKAGE')) {
 		$conv_current_time = 'LOCALTIMESTAMP';
@@ -396,7 +331,6 @@
 	$str =~ s/SYSTIMESTAMP/CURRENT_TIMESTAMP/igs;
 	# remove FROM DUAL
 	$str =~ s/FROM DUAL//igs;
-	$str =~ s/FROM SYS\.DUAL//igs;
 
 	# There's no such things in PostgreSQL
 	$str =~ s/PRAGMA RESTRICT_REFERENCES[^;]+;//igs;
@@ -416,8 +350,8 @@
 	$str =~ s/EXEC(\s+)/SELECT$1/igs;
 
 	# Remove leading : on Oracle variable
-	$str =~ s/([^\w:]+):(\d+)/$1\$$2/igs;
-	$str =~ s/([^\w:]+):(\w+)/$1$2/igs;
+	$str =~ s/([^\w]+):(\d+)/$1\$$2/igs;
+	$str =~ s/([^\w]+):(\w+)/$1$2/igs;
 
 	# INSERTING|DELETING|UPDATING -> TG_OP = 'INSERT'|'DELETE'|'UPDATE'
 	$str =~ s/\bINSERTING\b/TG_OP = 'INSERT'/igs;
@@ -429,17 +363,10 @@
 
 	# SELECT without INTO should be PERFORM. Exclude select of view when prefixed with AS ot IS
 	if ( ($class->{type} ne 'QUERY') && ($class->{type} ne 'VIEW') ) {
-		my @text_values = ();
-		my $j = 0;
-		while ($str =~ s/'([^']*)'/\%TEXTVALUE-$j\%/s) {
-			push(@text_values, $1);
-			$j++;
-		}
 		$str =~ s/(\s+)(?<!AS|IS)(\s+)SELECT((?![^;]+\bINTO\b)[^;]+;)/$1$2PERFORM$3/isg;
 		$str =~ s/\bSELECT\b((?![^;]+\bINTO\b)[^;]+;)/PERFORM$1/isg;
 		$str =~ s/(AS|IS|FOR|UNION ALL|UNION|MINUS|\()(\s*)(ORA2PG_COMMENT\d+\%)?(\s*)PERFORM/$1$2$3$4SELECT/isg;
 		$str =~ s/(INSERT\s+INTO\s+[^;]+\s+)PERFORM/$1SELECT/isg;
-		$str =~ s/\%TEXTVALUE-(\d+)\%/'$text_values[$1]'/gs;
 	}
 
 	# Change nextval on sequence
@@ -451,12 +378,18 @@
 	$str =~ s/\bMINUS\b/EXCEPT/igs;
 	# Comment DBMS_OUTPUT.ENABLE calls
 	$str =~ s/(DBMS_OUTPUT.ENABLE[^;]+;)/-- $1/isg;
+	# Raise information to the client
+	$str =~ s/DBMS_OUTPUT\.(put_line|put|new_line)\s*\((.*?)\);/&raise_output($2)/igse;
+
+	# Substitution to replace type of sql variable in PLSQL code
+#	foreach my $t (keys %Ora2Pg::TYPE) {
+#		$str =~ s/\b$t\b/$Ora2Pg::TYPE{$t}/igs;
+#	}
 
 	# Procedure are the same as function in PG
 	$str =~ s/\bPROCEDURE\b/FUNCTION/igs;
 	# Simply remove this as not supported
 	$str =~ s/\bDEFAULT\s+NULL\b//igs;
-
 	# Replace DEFAULT empty_blob() and empty_clob()
 	$str =~ s/(empty_blob|empty_clob)\(\s*\)//igs;
 	$str =~ s/(empty_blob|empty_clob)\b//igs;
@@ -486,39 +419,81 @@
 	}
 
 	# Replace CURSOR IS SELECT by CURSOR FOR SELECT
-	$str =~ s/\bCURSOR(\s+)IS(\s+)SELECT/CURSOR$1FOR$2SELECT/isg;
+	$str =~ s/\bCURSOR(\s*)IS(\s*)SELECT/CURSOR$1FOR$2SELECT/isg;
 	# Replace CURSOR (param) IS SELECT by CURSOR FOR SELECT
 	$str =~ s/\bCURSOR(\s*\([^\)]+\)\s*)IS(\s*)SELECT/CURSOR$1FOR$2SELECT/isg;
-	# Replace OPEN cursor FOR with dynamic query
-	$str =~ s/(OPEN\s+(?:.*?)\s+FOR)((?:.*?)USING)/$1 EXECUTE$2/isg;
-	$str =~ s/(OPEN\s+(?:.*?)\s+FOR)\s+((?!EXECUTE)(?:.*?)\|\|)/$1 EXECUTE $2/isg;
-	$str =~ s/(OPEN\s+(?:.*?)\s+FOR)\s+([^\s]+\s*;)/$1 EXECUTE $2/isg;
+
+	# Rewrite TO_DATE formating call
+	$str =~ s/TO_DATE\s*\(\s*('[^\']+'),\s*('[^\']+')[^\)]*\)/to_date($1,$2)/igs;
 
 	# Normalize HAVING ... GROUP BY into GROUP BY ... HAVING clause	
 	$str =~ s/\bHAVING\b(.*?)\bGROUP BY\b(.*?)((?=UNION|ORDER BY|LIMIT|INTO |FOR UPDATE|PROCEDURE)|$)/GROUP BY$2 HAVING$1/gis;
 
+	# Cast round() call as numeric => Remove because most of the time this may not be necessary
+	#$str =~ s/round\s*\((.*?),([\s\d]+)\)/round\($1::numeric,$2\)/igs;
+
+	# Change trunc() to date_trunc('day', field)
+	# Trunc is replaced with date_trunc if we find date in the name of
+	# the value because Oracle have the same trunc function on number
+	# and date type
+	my %date_trunc = ();
+	my $di = 0;
+	while ($str =~ s/\bTRUNC\($date_field\)/\%\%DATETRUNC$di\%\%/is) {
+		push(@date_trunc, "date_trunc('day', $1)");
+		$di++;
+	}
+	while ($str =~ s/\bTRUNC\($date_field,$field\)/\%\%DATETRUNC$di\%\%/is) {
+		push(@date_trunc, "date_trunc($2, $1)");
+		$di++;
+	}
+
+	# Convert the call to the Oracle function add_months() into Pg syntax
+	$str =~ s/ADD_MONTHS\s*\(\s*TO_CHAR\(\s*([^,]+)(.*?),\s*(\d+)\s*\)/$1 + '$3 month'::interval/gsi;
+	$str =~ s/ADD_MONTHS\s*\(\s*TO_CHAR\(\s*([^,]+)(.*?),\s*([^,\(\)]+)\s*\)/$1 + $3*'1 month'::interval/gsi;
+	$str =~ s/ADD_MONTHS\s*\((.*?),\s*(\d+)\s*\)/$1 + '$2 month'::interval/gsi;
+	$str =~ s/ADD_MONTHS\s*\((.*?),\s*([^,\(\)]+)\s*\)/$1 + $2*'1 month'::interval/gsi;
+
+	# Convert the call to the Oracle function add_years() into Pg syntax
+	$str =~ s/ADD_YEARS\s*\(\s*TO_CHAR\(\s*([^,]+)(.*?),\s*(\d+)\s*\)/$1 + '$3 year'::interval/gsi;
+	$str =~ s/ADD_YEARS\s*\(\s*TO_CHAR\(\s*([^,]+)(.*?),\s*([^,\(\)]+)\s*\)/$1 + $3*'1 year'::interval/gsi;
+	$str =~ s/ADD_YEARS\s*\((.*?),\s*(\d+)\s*\)/$1 + '$2 year'::interval/gsi;
+	$str =~ s/ADD_YEARS\s*\((.*?),\s*([^,\(\)]+)\s*\)/$1 + $2*' year'::interval/gsi;
+
+	# Restore DATETRUNC call
+	$str =~ s/\%\%DATETRUNC(\d+)\%\%/$date_trunc[$1]/igs;
+	@date_trunc = ();
+	$str =~ s/date_trunc\('MM'/date_trunc('month'/igs;
+
 	# Add STRICT keyword when select...into and an exception with NO_DATA_FOUND/TOO_MANY_ROW is present
-	$str =~ s/\b(SELECT\b[^;]*?INTO)(.*?)(EXCEPTION.*?(?:NO_DATA_FOUND|TOO_MANY_ROW))/$1 STRICT $2 $3/igs;
+	if ($str !~ s/\b(SELECT\b[^;]*?INTO)(.*?)(EXCEPTION.*?NO_DATA_FOUND)/$1 STRICT $2 $3/igs) {
+		$str =~ s/\b(SELECT\b[^;]*?INTO)(.*?)(EXCEPTION.*?TOO_MANY_ROW)/$1 STRICT $2 $3/igs;
+	}
 
 	# Remove the function name repetion at end
-	$str =~ s/\bEND\s+(?!IF|LOOP|CASE|INTO|FROM|END|,)[a-z0-9_"]+(\s*[;]?)/END$1$2/igs;
+	$str =~ s/END\s+(?!IF|LOOP|CASE|INTO|FROM|,)[a-z0-9_"]+\s*([;]*)\s*$/END$1/igs;
 
-	####
 	# Replace ending ROWNUM with LIMIT
-	####
-	# Catch potential subquery first
-	my %subqueries = ();
-	my $j = 0;
-	($str, %subqueries) = extract_subqueries($str, \$j);
-	$str = replace_rownum_with_limit($str);
-	$str =~ s/\%SUBQUERY(\d+)\%/\($subqueries{$1}\)/gs;
+        $str =~ s/(WHERE|AND)\s*ROWNUM\s*=\s*(\d+)/'LIMIT 1 OFFSET ' . ($2-1)/iges;
+        $str =~ s/(WHERE|AND)\s*ROWNUM\s*<=\s*(\d+)/LIMIT $2/igs;
+        $str =~ s/(WHERE|AND)\s*ROWNUM\s*>=\s*(\d+)/'LIMIT ALL OFFSET ' . ($2-1)/iges;
+        $str =~ s/(WHERE|AND)\s*ROWNUM\s*<\s*(\d+)/'LIMIT ' . ($2-1)/iges;
+        $str =~ s/(WHERE|AND)\s*ROWNUM\s*>\s*(\d+)/LIMIT ALL OFFSET $2/igs;
 
 	# Rewrite comment in CASE between WHEN and THEN
 	$str =~ s/(\s*)(WHEN\s+[^\s]+\s*)(ORA2PG_COMMENT\d+\%)(\s*THEN)/$1$3$1$2$4/igs;
 
+	# Replace INSTR by POSITION
+	$str =~ s/INSTR\s*\(\s*([^,]+),\s*('[^']+')\s*\)/POSITION($2 in $1)/igs;
+
 	# Replace SQLCODE by SQLSTATE
 	$str =~ s/\bSQLCODE\b/SQLSTATE/igs;
 
+	# Replace some way of extracting date part of a date
+	$str =~ s/TO_NUMBER\s*\(\s*TO_CHAR\s*\(([^,]+),\s*('[^']+')\s*\)\s*\)/to_char($1, $2)::integer/igs;
+
+	# Replace the UTC convertion with the PG syntaxe
+	$str =~ s/SYS_EXTRACT_UTC\s*\(([^\)]+)\)/($1 AT TIME ZONE 'UTC')/isg;
+
 	# Revert order in FOR IN REVERSE
 	$str =~ s/FOR(.*?)IN\s+REVERSE\s+([^\.\s]+)\s*\.\.\s*([^\s]+)/FOR$1IN REVERSE $3..$2/isg;
 
@@ -550,10 +525,47 @@
 	$str =~ s/([\-]*)BINARY_(FLOAT|DOUBLE)_INFINITY/'$1Infinity'/igs;
 	$str =~ s/'([\-]*)Inf'/'$1Infinity'/igs;
 
+	# REGEX_LIKE( string, pattern ) => string ~ pattern
+	$str =~ s/REGEXP_LIKE\s*\(\s*([^,]+)\s*,\s*('[^\']+')\s*\)/$1 \~ $2/igs;
+
+	# Remove call to XMLCDATA, there's no such function with PostgreSQL
+	$str =~ s/XMLCDATA\s*\(([^\)]+)\)/'<![CDATA[' || $1 || ']]>'/igs;
+	# Remove call to getClobVal() or getStringVal, no need of that
+	$str =~ s/\.(getClobVal|getStringVal)\(\s*\)//igs;
+	# Add the name keyword to XMLELEMENT
+	$str =~ s/XMLELEMENT\s*\(\s*/XMLELEMENT(name /igs;
+
+	# Store XML element into memory and replace it by a placeholder to be
+	# able to use it into function call and not break decode replacement
+	my $i = 0;
+	while ($str =~ s/(XMLELEMENT\s*\([^\)]+\))/%%XMLELEMENT$i%%/is) {
+		my $tmpstr = replace_decode($1);
+		push(@xmlelt, $tmpstr);
+		$i++;
+	}
+
 	# Replace PIPE ROW by RETURN NEXT
 	$str =~ s/PIPE\s+ROW\s*/RETURN NEXT /igs;
 	$str =~ s/(RETURN NEXT )\(([^\)]+)\)/$1$2/igs;
 
+	# The to_number() function reclaim a second argument under postgres which is the format.
+	# By default we use '99999999999999999999D99999999999999999999' that may allow bigint
+	# and double precision number. Feel free to modify it
+	$str =~ s/TO_NUMBER\s*\(([^,\(\)]+)\s*\)/to_number\($1,'99999999999999999999D99999999999999999999'\)/igs;
+
+	# Replace sys_context call to the postgresql equivalent
+	$str = &replace_sys_context($str);
+
+	# Replace SDO_GEOM to the postgis equivalent
+	$str = &replace_sdo_function($str);
+
+	# Replace Spatial Operator to the postgis equivalent
+	$str = &replace_sdo_operator($str);
+
+	# Replace decode("user_status",'active',"username",null)
+	# PostgreSQL (CASE WHEN "user_status"='ACTIVE' THEN "username" ELSE NULL END)
+	$str = replace_decode($str);
+
 	#  Convert all x <> NULL or x != NULL clauses to x IS NOT NULL.
 	$str =~ s/\s*(<>|\!=)\s*NULL/ IS NOT NULL/igs;
 	#  Convert all x = NULL clauses to x IS NULL.
@@ -567,157 +579,24 @@
 		$str =~ s/([a-z0-9_\."]+)\s*IS NULL/coalesce($1::text, '') = ''/igs;
 		$str =~ s/([a-z0-9_\."]+)\s*IS NOT NULL/($1 IS NOT NULL AND $1::text <> '')/igs;
 		# Form: fct(expression) IS NULL
-		$str =~ s/([a-z0-9_\."]+\s*\([^\)\(]*\))\s*IS NULL/coalesce($1::text, '') = ''/igs;
-		$str =~ s/([a-z0-9_\."]+\s*\([^\)\(]*\))\s*IS NOT NULL/($1 IS NOT NULL AND ($1)::text <> '')/igs;
+		$str =~ s/([a-z0-9_\."]+\s*\([^\)]*\))\s*IS NULL/coalesce($1::text, '') = ''/igs;
+		$str =~ s/([a-z0-9_\."]+\s*\([^\)]*\))\s*IS NOT NULL/($1 IS NOT NULL AND ($1)::text <> '')/igs;
 	}
 
-	# Replace type in sub block
-	$str =~ s/(DECLARE\s+)(.*?)(\s+BEGIN)/$1 . &replace_sql_type($2, $class->{pg_numeric_type}, $class->{default_numeric}, $class->{pg_integer_type}) . $3/iges;
+	# Rewrite replace(a,b) with three argument
+	$str =~ s/REPLACE\s*\($field,$field\)/replace\($1, $2, ''\)/igs;
+
+	# Replace Oracle substr(string, start_position, length) with
+	# PostgreSQL substring(string from start_position for length)
+	$str =~ s/substr\s*\($field,$field,$field\)/substring($1 from $2 for $3)/igs;
+	$str =~ s/substr\s*\($field,$field\)/substring($1 from $2)/igs;
 
 	# Remove any call to MDSYS schema in the code
 	$str =~ s/MDSYS\.//igs;
 
-	if ($class->{rewrite_outer_join}) {
-		# Replace call to right outer join obsolete syntax
-		$str = replace_right_outer_join($str);
-
-		# Replace call to left outer join obsolete syntax
-		$str = replace_left_outer_join($str);
-	}
-
-	return $str;
-}
-
-sub extract_subqueries
-{
-	my ($query, $pos) = @_;
-
-	my %queries = ();
-	my $out_query = '';
-	my $idx = 0;
-	my $sub_query = '';
-	foreach my $c (split(//, $query)) {
-		$idx++ if ($c eq '(');
-		$idx-- if ($c eq ')');
-		if ($idx > 0) {
-			$sub_query .= $c;
-		} elsif ($sub_query && $c eq ')') {
-			$sub_query =~ s/^\(//;
-			$queries{$$pos} = replace_rownum_with_limit($sub_query);
-			$out_query .= "\%SUBQUERY$$pos\%";
-			$sub_query = '';
-			$$pos++;
-		} else {
-			$out_query .= $c;
-		}
-	}
-	return $out_query, %queries;
-}
-
-sub replace_rownum_with_limit
-{
-	my $str = shift;
-
-        $str =~ s/\s+(WHERE|AND)\s+ROWNUM\s*=\s*(\d+)([^;]*)/' ' . $1 . $3 . ' LIMIT 1 OFFSET ' . ($2-1)/iges;
-        $str =~ s/\s+(WHERE|AND)\s+ROWNUM\s*<=\s*(\d+)([^;]*)/ $1 $3 LIMIT $2/igs;
-        $str =~ s/\s+(WHERE|AND)\s+ROWNUM\s*>=\s*(\d+)([^;]*)/' ' . $1 . ' ' . $3 . ' LIMIT ALL OFFSET ' . ($2-1)/iges;
-        $str =~ s/\s+(WHERE|AND)\s+ROWNUM\s*<\s*(\d+)([^;]*)/' ' . $1 . ' ' . $3 . ' LIMIT ' . ($2-1)/iges;
-        $str =~ s/\s+(WHERE|AND)\s+ROWNUM\s*>\s*(\d+)([^;]*)/ $1 $3 LIMIT ALL OFFSET $2/igs;
-
-	$str =~ s/(where)\s+and/$1/igs;
-	$str =~ s/\s+(?:where|and)(\s+LIMIT\s+)/$1/igs;
-
-	return $str;
-}
-
-sub replace_oracle_function
-{
-        my ($class, $str) = @_;
-
-	my @xmlelt = ();
-	my $field = '\s*([^\(\),]+)\s*';
-	my $num_field = '\s*([\d\.]+)\s*';
-	my $date_field = '\s*([^,\)\(]*(?:date|time)[^,\)\(]*)\s*';
-
-	#--------------------------------------------
-	# PL/SQL to PL/PGSQL code conversion
-	# Feel free to add your contribution here.
-	#--------------------------------------------
-	# Change NVL to COALESCE
-	$str =~ s/NVL\s*\(/coalesce(/is;
-
-	# Raise information to the client
-	$str =~ s/DBMS_OUTPUT\.(put_line|put|new_line)\s*\((.*)\)/&raise_output($2)/ise;
-
-	# Replace DEFAULT empty_blob() and empty_clob()
-	$str =~ s/(empty_blob|empty_clob)\s*\(\s*\)//is;
-	$str =~ s/(empty_blob|empty_clob)\b//is;
-
-	# Replace call to SYS_GUID() function
-	$str =~ s/\bSYS_GUID\s*\(\s*\)/$class->{uuid_function}()/is;
-	$str =~ s/\bSYS_GUID\b/$class->{uuid_function}()/is;
-
-	# Rewrite TO_DATE formating call
-	$str =~ s/TO_DATE\s*\(\s*('[^\']+'),\s*('[^\']+')[^\)]*\)/to_date($1,$2)/is;
-
-	# Replace call to trim into btrim
-	$str =~ s/\bTRIM\s*\(([^\(\)]+)\)/btrim($1)/is;
-
-	# Change trunc() to date_trunc('day', field)
-	# Trunc is replaced with date_trunc if we find date in the name of
-	# the value because Oracle have the same trunc function on number
-	# and date type
-	$str =~ s/\bTRUNC\s*\($date_field\)/date_trunc('day', $1)/is;
-	$str =~ s/\bTRUNC\s*\($date_field,$field\)/date_trunc($2, $1)/is;
-	$str =~ s/date_trunc\('MM'/date_trunc('month'/is;
-
-	# Convert the call to the Oracle function add_months() into Pg syntax
-	$str =~ s/ADD_MONTHS\s*\(([^,]+),\s*(\d+)\s*\)/$1 + '$2 month'::interval/si;
-	$str =~ s/ADD_MONTHS\s*\(([^,]+),\s*([^,\(\)]+)\s*\)/$1 + $2*'1 month'::interval/si;
-
-	# Convert the call to the Oracle function add_years() into Pg syntax
-	$str =~ s/ADD_YEARS\s*\(([^,]+),\s*(\d+)\s*\)/$1 + '$2 year'::interval/si;
-	$str =~ s/ADD_YEARS\s*\(([^,]+),\s*([^,\(\)]+)\s*\)/$1 + $2*' year'::interval/si;
-
-	# Replace INSTR by POSITION
-	$str =~ s/\bINSTR\s*\(\s*([^,]+),\s*('[^']+')\s*\)/POSITION($2 in $1)/is;
-
-	# The to_number() function reclaim a second argument under postgres which is the format.
-	# By default we use '99999999999999999999D99999999999999999999' that may allow bigint
-	# and double precision number. Feel free to modify it
-	#$str =~ s/TO_NUMBER\s*\(([^,\)]+)\)/to_number\($1,'99999999999999999999D99999999999999999999'\)/is;
-
-	# Replace to_number with a cast
-	$str =~ s/TO_NUMBER\s*\(\s*([^\)]+)\s*\)/($1)\:\:integer/is;
-
-	# Replace the UTC convertion with the PG syntaxe
-	$str =~ s/SYS_EXTRACT_UTC\s*\(([^\)]+)\)/($1 AT TIME ZONE 'UTC')/is;
-
-	# REGEX_LIKE( string, pattern ) => string ~ pattern
-	$str =~ s/REGEXP_LIKE\s*\(\s*([^,]+)\s*,\s*('[^\']+')\s*\)/$1 \~ $2/is;
-
-	# Remove call to XMLCDATA, there's no such function with PostgreSQL
-	$str =~ s/XMLCDATA\s*\(([^\)]+)\)/'<![CDATA[' || $1 || ']]>'/is;
-	# Remove call to getClobVal() or getStringVal, no need of that
-	$str =~ s/\.(getClobVal|getStringVal)\s*\(\s*\)//is;
-	# Add the name keyword to XMLELEMENT
-	$str =~ s/XMLELEMENT\s*\(\s*/XMLELEMENT(name /is;
-
-	# Cast round() call as numeric
-	$str =~ s/round\s*\(([^,]+),([\s\d]+)\)/round\(($1)::numeric,$2\)/igs;
-
-	# Replace SDO_GEOM to the postgis equivalent
-	$str = &replace_sdo_function($str);
-
-	# Replace Spatial Operator to the postgis equivalent
-	$str = &replace_sdo_operator($str);
-
-	# Rewrite replace(a,b) with three argument
-	$str =~ s/REPLACE\s*\($field,$field\)/replace($1, $2, '')/is;
-
-	# Replace decode("user_status",'active',"username",null)
-	# PostgreSQL (CASE WHEN "user_status"='ACTIVE' THEN "username" ELSE NULL END)
-	$str = replace_decode($str);
+	# Restore XMLELEMENT call
+	$str =~ s/\%\%XMLELEMENT(\d+)\%\%/$xmlelt[$1]/igs;
+	@xmlelt = ();
 
 	##############
 	# Replace package.function call by package_function
@@ -725,7 +604,7 @@
 	if (scalar keys %{$class->{package_functions}}) {
 		my @text_values = ();
 		my $j = 0;
-		while ($str =~ s/'([^']*)'/\%TEXTVALUE-$j\%/s) {
+		while ($str =~ s/'([^']+)'/\%TEXTVALUE-$j\%/s) {
 			push(@text_values, $1);
 			$j++;
 		}
@@ -735,39 +614,46 @@
 		$str =~ s/\%TEXTVALUE-(\d+)\%/'$text_values[$1]'/gs;
 	}
 
-	# Replace some sys_context call to the postgresql equivalent
-	replace_sys_context($str);
+	# Replace call to trim into btrim
+	$str =~ s/\bTRIM\(([^\(\)]+)\)/btrim($1)/igs;
 
 	return $str;
 }
 
-
 sub replace_decode
 {
 	my $str = shift;
 
 	my $decode_idx = 0;
 	my @str_decode = ();
-
-	if ($str =~ s/DECODE\s*\(([^\(\)]+)\)/DECODE\%\%/is) {
+	while ($str =~ s/DECODE\s*\(([^\(\)]+)\)/DECODE%$decode_idx%/is) {
 		push(@str_decode, $1);
-		# Create an array with all parameter of the decode function
-		my @fields = split(/\s*,\s*/s, $str_decode[-1]);
-		my $case_str = 'CASE ';
-		for (my $i = 1; $i <= $#fields; $i+=2) {
-			if ($i < $#fields) {
-				$case_str .= "WHEN $fields[0]=$fields[$i] THEN $fields[$i+1] ";
-			} else {
-				$case_str .= " ELSE $fields[$i] ";
+		# When there is no potential subquery in the decode statement
+		if ($str_decode[-1] !~ /(\(|\))/) {
+			# Create an array with all parameter of the decode function
+			my @fields = split(/\s*,\s*/s, $str_decode[-1]);
+			my $case_str = 'CASE ';
+			for (my $i = 1; $i <= $#fields; $i+=2) {
+				if ($i < $#fields) {
+					$case_str .= "WHEN $fields[0]=$fields[$i] THEN $fields[$i+1] ";
+				} else {
+					$case_str .= " ELSE $fields[$i] ";
+				}
 			}
+			$case_str .= 'END';
+			$str_decode[-1] = $case_str;
 		}
-		$case_str .= 'END';
-		$str =~ s/DECODE\%\%/$case_str/s;
+		$decode_idx++;
+	}
+	while ($str =~ /DECODE%(\d+)%/) {
+		$decode_idx = $1;
+		$str =~ s/DECODE%$decode_idx%/$str_decode[$decode_idx]/s;
 	}
 
 	return $str;
 }
 
+
 # Function to replace call to SYS_CONTECT('USERENV', ...)
 # List of Oracle environment variables: http://docs.oracle.com/cd/B28359_01/server.111/b28286/functions172.htm
 # Possibly corresponding PostgreSQL variables: http://www.postgresql.org/docs/current/static/functions-info.html
@@ -775,14 +661,14 @@
 {
 	my $str = shift;
 
-	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'(OS_USER|SESSION_USER|AUTHENTICATED_IDENTITY)'\s*\)/session_user/is;
-	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'BG_JOB_ID'\s*\)/pg_backend_pid()/is;
-	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'(CLIENT_IDENTIFIER|PROXY_USER)'\s*\)/session_user/is;
-	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'CURRENT_SCHEMA'\s*\)/current_schema/is;
-	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'CURRENT_USER'\s*\)/current_user/is;
-	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'(DB_NAME|DB_UNIQUE_NAME)'\s*\)/current_database/is;
-	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'(HOST|IP_ADDRESS)'\s*\)/inet_client_addr()/is;
-	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'SERVER_HOST'\s*\)/inet_server_addr()/is;
+	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'(OS_USER|SESSION_USER|AUTHENTICATED_IDENTITY)'\s*\)/session_user/igs;
+	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'BG_JOB_ID'\s*\)/pg_backend_pid()/igs;
+	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'(CLIENT_IDENTIFIER|PROXY_USER)'\s*\)/session_user/igs;
+	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'CURRENT_SCHEMA'\s*\)/current_schema/igs;
+	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'CURRENT_USER'\s*\)/current_user/igs;
+	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'(DB_NAME|DB_UNIQUE_NAME)'\s*\)/current_database/igs;
+	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'(HOST|IP_ADDRESS)'\s*\)/inet_client_addr()/igs;
+	$str =~ s/SYS_CONTEXT\s*\(\s*'USERENV'\s*,\s*'SERVER_HOST'\s*\)/inet_server_addr()/igs;
 
 	return $str;
 }
@@ -817,55 +703,55 @@
 	my $num_field = '\s*[\d\.]+\s*';
 
 	# SDO_GEOM.RELATE(geom1 IN SDO_GEOMETRY,mask IN VARCHAR2,geom2 IN SDO_GEOMETRY,tol IN NUMBER)
-	$str =~ s/(ST_Relate\s*\($field),$field,($field),($field)\)/$1,$2\)/is;
+	$str =~ s/(ST_Relate\s*\($field),$field,($field),($field)\)/$1,$2\)/igs;
 	# SDO_GEOM.RELATE(geom1 IN SDO_GEOMETRY,dim1 IN SDO_DIM_ARRAY,mask IN VARCHAR2,geom2 IN SDO_GEOMETRY,dim2 IN SDO_DIM_ARRAY)
-	$str =~ s/(ST_Relate\s*\($field),$field,$field,($field),$field\)/$1,$2\)/is;
+	$str =~ s/(ST_Relate\s*\($field),$field,$field,($field),$field\)/$1,$2\)/igs;
 	# SDO_GEOM.SDO_AREA(geom IN SDO_GEOMETRY, tol IN NUMBER [, unit IN VARCHAR2])
 	# SDO_GEOM.SDO_AREA(geom IN SDO_GEOMETRY,dim IN SDO_DIM_ARRAY [, unit IN VARCHAR2])
-	$str =~ s/(ST_Area\s*\($field),[^\)]+\)/$1\)/is;
+	$str =~ s/(ST_Area\s*\($field),[^\)]+\)/$1\)/igs;
 	# SDO_GEOM.SDO_BUFFER(geom IN SDO_GEOMETRY,dist IN NUMBER, tol IN NUMBER [, params IN VARCHAR2])
-	$str =~ s/(ST_Buffer\s*\($field,$num_field),[^\)]+\)/$1\)/is;
+	$str =~ s/(ST_Buffer\s*\($field,$num_field),[^\)]+\)/$1\)/igs;
 	# SDO_GEOM.SDO_BUFFER(geom IN SDO_GEOMETRY,dim IN SDO_DIM_ARRAY,dist IN NUMBER [, params IN VARCHAR2])
-	$str =~ s/(ST_Buffer\s*\($field),$field,($num_field)[^\)]*\)/$1,$2\)/is;
+	$str =~ s/(ST_Buffer\s*\($field),$field,($num_field)[^\)]*\)/$1,$2\)/igs;
 	# SDO_GEOM.SDO_CENTROID(geom1 IN SDO_GEOMETRY,tol IN NUMBER)
 	# SDO_GEOM.SDO_CENTROID(geom1 IN SDO_GEOMETRY,dim1 IN SDO_DIM_ARRAY)
-	$str =~ s/(ST_Centroid\s*\($field),$field\)/$1\)/is;
+	$str =~ s/(ST_Centroid\s*\($field),$field\)/$1\)/igs;
 	# SDO_GEOM.SDO_CONVEXHULL(geom1 IN SDO_GEOMETRY,tol IN NUMBER)
 	# SDO_GEOM.SDO_CONVEXHULL(geom1 IN SDO_GEOMETRY,dim1 IN SDO_DIM_ARRAY)
-	$str =~ s/(ST_ConvexHull\s*\($field),$field\)/$1\)/is;
+	$str =~ s/(ST_ConvexHull\s*\($field),$field\)/$1\)/igs;
 	# SDO_GEOM.SDO_DIFFERENCE(geom1 IN SDO_GEOMETRY,geom2 IN SDO_GEOMETRY,tol IN NUMBER)
-	$str =~ s/(ST_Difference\s*\($field,$field),$field\)/$1\)/is;
+	$str =~ s/(ST_Difference\s*\($field,$field),$field\)/$1\)/igs;
 	# SDO_GEOM.SDO_DIFFERENCE(geom1 IN SDO_GEOMETRY,dim1 IN SDO_DIM_ARRAY,geom2 IN SDO_GEOMETRY,dim2 IN SDO_DIM_ARRAY)
-	$str =~ s/(ST_Difference\s*\($field),$field,($field),$field\)/$1,$2\)/is;
+	$str =~ s/(ST_Difference\s*\($field),$field,($field),$field\)/$1,$2\)/igs;
 	# SDO_GEOM.SDO_DISTANCE(geom1 IN SDO_GEOMETRY,geom2 IN SDO_GEOMETRY,tol IN NUMBER [, unit IN VARCHAR2])
-	$str =~ s/(ST_Distance\s*\($field,$field),($num_field)[^\)]*\)/$1\)/is;
+	$str =~ s/(ST_Distance\s*\($field,$field),($num_field)[^\)]*\)/$1\)/igs;
 	# SDO_GEOM.SDO_DISTANCE(geom1 IN SDO_GEOMETRY,dim1 IN SDO_DIM_ARRAY,geom2 IN SDO_GEOMETRY,dim2 IN SDO_DIM_ARRAY [, unit IN VARCHAR2])
-	$str =~ s/(ST_Distance\s*\($field),$field,($field),($field)[^\)]*\)/$1,$2\)/is;
+	$str =~ s/(ST_Distance\s*\($field),$field,($field),($field)[^\)]*\)/$1,$2\)/igs;
 	# SDO_GEOM.SDO_INTERSECTION(geom1 IN SDO_GEOMETRY,geom2 IN SDO_GEOMETRY,tol IN NUMBER)
-	$str =~ s/(ST_Intersection\s*\($field,$field),$field\)/$1\)/is;
+	$str =~ s/(ST_Intersection\s*\($field,$field),$field\)/$1\)/igs;
 	# SDO_GEOM.SDO_INTERSECTION(geom1 IN SDO_GEOMETRY,dim1 IN SDO_DIM_ARRAY,geom2 IN SDO_GEOMETRY,dim2 IN SDO_DIM_ARRAY)
-	$str =~ s/(ST_Intersection\s*\($field),$field,($field),$field\)/$1,$2\)/is;
+	$str =~ s/(ST_Intersection\s*\($field),$field,($field),$field\)/$1,$2\)/igs;
 	# SDO_GEOM.SDO_LENGTH(geom IN SDO_GEOMETRY, dim IN SDO_DIM_ARRAY [, unit IN VARCHAR2])
 	# SDO_GEOM.SDO_LENGTH(geom IN SDO_GEOMETRY, tol IN NUMBER [, unit IN VARCHAR2])
-	$str =~ s/(ST_Length\s*\($field),($field)[^\)]*\)/$1\)/is;
+	$str =~ s/(ST_Length\s*\($field),($field)[^\)]*\)/$1\)/igs;
 	# SDO_GEOM.SDO_POINTONSURFACE(geom1 IN SDO_GEOMETRY, tol IN NUMBER)
 	# SDO_GEOM.SDO_POINTONSURFACE(geom1 IN SDO_GEOMETRY, dim1 IN SDO_DIM_ARRAY)
-	$str =~ s/(ST_PointOnSurface\s*\($field),$field\)/$1\)/is;
+	$str =~ s/(ST_PointOnSurface\s*\($field),$field\)/$1\)/igs;
 	# SDO_GEOM.SDO_UNION(geom1 IN SDO_GEOMETRY, geom2 IN SDO_GEOMETRY, tol IN NUMBER)
-	$str =~ s/(ST_Union\s*\($field,$field),$field\)/$1\)/is;
+	$str =~ s/(ST_Union\s*\($field,$field),$field\)/$1\)/igs;
 	# SDO_GEOM.SDO_UNION(geom1 IN SDO_GEOMETRY,dim1 IN SDO_DIM_ARRAY,geom2 IN SDO_GEOMETRY,dim2 IN SDO_DIM_ARRAY)
-	$str =~ s/(ST_Union\s*\($field),$field,($field),$field\)/$1,$2\)/is;
+	$str =~ s/(ST_Union\s*\($field),$field,($field),$field\)/$1,$2\)/igs;
 	# SDO_GEOM.SDO_XOR(geom1 IN SDO_GEOMETRY,geom2 IN SDO_GEOMETRY, tol IN NUMBER)
-	$str =~ s/(ST_SymDifference\s*\($field,$field),$field\)/$1\)/is;
+	$str =~ s/(ST_SymDifference\s*\($field,$field),$field\)/$1\)/igs;
 	# SDO_GEOM.SDO_XOR(geom1 IN SDO_GEOMETRY,dim1 IN SDO_DIM_ARRAY,geom2 IN SDO_GEOMETRY,dim2 IN SDO_DIM_ARRAY)
-	$str =~ s/(ST_SymDifference\s*\($field),$field,($field),$field\)/$1,$2\)/is;
+	$str =~ s/(ST_SymDifference\s*\($field),$field,($field),$field\)/$1,$2\)/igs;
 	# SDO_GEOM.VALIDATE_GEOMETRY_WITH_CONTEXT(geom1 IN SDO_GEOMETRY, tol IN NUMBER)
 	# SDO_GEOM.VALIDATE_GEOMETRY_WITH_CONTEXT(geom1 IN SDO_GEOMETRY, dim1 IN SDO_DIM_ARRAY)
-	$str =~ s/(ST_IsValidReason\s*\($field),$field\)/$1\)/is;
+	$str =~ s/(ST_IsValidReason\s*\($field),$field\)/$1\)/igs;
 	# SDO_GEOM.WITHIN_DISTANCE(geom1 IN SDO_GEOMETRY,dim1 IN SDO_DIM_ARRAY,dist IN NUMBER,geom2 IN SDO_GEOMETRY,dim2 IN SDO_DIM_ARRAY [, units IN VARCHAR2])
-	$str =~ s/(ST_DWithin\s*\($field),$field,($field),($field),($field)[^\)]*\)/$1,$3,$2\)/is;
+	$str =~ s/(ST_DWithin\s*\($field),$field,($field),($field),($field)[^\)]*\)/$1,$3,$2\)/igsgs;
 	# SDO_GEOM.WITHIN_DISTANCE(geom1 IN SDO_GEOMETRY,dist IN NUMBER,geom2 IN SDO_GEOMETRY, tol IN NUMBER [, units IN VARCHAR2])
-	$str =~ s/(ST_DWithin\s*\($field)(,$field)(,$field),($field)[^\)]*\)/$1$3$2\)/is;
+	$str =~ s/(ST_DWithin\s*\($field)(,$field)(,$field),($field)[^\)]*\)/$1$3$2\)/igs;
 
 	return $str;
 }
@@ -875,45 +761,45 @@
 	my $str = shift;
 
 	# SDO_CONTAINS(geometry1, geometry2) = 'TRUE'
-	$str =~ s/SDO_CONTAINS\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Contains($1)/is;
-	$str =~ s/SDO_CONTAINS\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Contains($1)/is;
-	$str =~ s/SDO_CONTAINS\s*\(([^\)]+)\)/ST_Contains($1)/is;
+	$str =~ s/SDO_CONTAINS\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Contains($1)/igs;
+	$str =~ s/SDO_CONTAINS\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Contains($1)/igs;
+	$str =~ s/SDO_CONTAINS\s*\(([^\)]+)\)/ST_Contains($1)/igs;
 	# SDO_RELATE(geometry1, geometry2, param) = 'TRUE'
-	$str =~ s/SDO_RELATE\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Relate($1)/is;
-	$str =~ s/SDO_RELATE\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Relate($1)/is;
-	$str =~ s/SDO_RELATE\s*\(([^\)]+)\)/ST_Relate($1)/is;
+	$str =~ s/SDO_RELATE\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Relate($1)/igs;
+	$str =~ s/SDO_RELATE\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Relate($1)/igs;
+	$str =~ s/SDO_RELATE\s*\(([^\)]+)\)/ST_Relate($1)/igs;
 	# SDO_WITHIN_DISTANCE(geometry1, aGeom, params) = 'TRUE'
-	$str =~ s/SDO_WITHIN_DISTANCE\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_DWithin($1)/is;
-	$str =~ s/SDO_WITHIN_DISTANCE\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_DWithin($1)/is;
-	$str =~ s/SDO_WITHIN_DISTANCE\s*\(([^\)]+)\)/ST_DWithin($1)/is;
+	$str =~ s/SDO_WITHIN_DISTANCE\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_DWithin($1)/igs;
+	$str =~ s/SDO_WITHIN_DISTANCE\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_DWithin($1)/igs;
+	$str =~ s/SDO_WITHIN_DISTANCE\s*\(([^\)]+)\)/ST_DWithin($1)/igs;
 	# SDO_TOUCH(geometry1, geometry2) = 'TRUE'
-	$str =~ s/SDO_TOUCH\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Touches($1)/is;
-	$str =~ s/SDO_TOUCH\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Touches($1)/is;
-	$str =~ s/SDO_TOUCH\s*\(([^\)]+)\)/ST_Touches($1)/is;
+	$str =~ s/SDO_TOUCH\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Touches($1)/igs;
+	$str =~ s/SDO_TOUCH\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Touches($1)/igs;
+	$str =~ s/SDO_TOUCH\s*\(([^\)]+)\)/ST_Touches($1)/igs;
 	# SDO_OVERLAPS(geometry1, geometry2) = 'TRUE'
-	$str =~ s/SDO_OVERLAPS\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Overlaps($1)/is;
-	$str =~ s/SDO_OVERLAPS\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Overlaps($1)/is;
-	$str =~ s/SDO_OVERLAPS\s*\(([^\)]+)\)/ST_Overlaps($1)/is;
+	$str =~ s/SDO_OVERLAPS\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Overlaps($1)/igs;
+	$str =~ s/SDO_OVERLAPS\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Overlaps($1)/igs;
+	$str =~ s/SDO_OVERLAPS\s*\(([^\)]+)\)/ST_Overlaps($1)/igs;
 	# SDO_INSIDE(geometry1, geometry2) = 'TRUE'
-	$str =~ s/SDO_INSIDE\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Within($1)/is;
-	$str =~ s/SDO_INSIDE\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Within($1)/is;
-	$str =~ s/SDO_INSIDE\s*\(([^\)]+)\)/ST_Within($1)/is;
+	$str =~ s/SDO_INSIDE\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Within($1)/igs;
+	$str =~ s/SDO_INSIDE\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Within($1)/igs;
+	$str =~ s/SDO_INSIDE\s*\(([^\)]+)\)/ST_Within($1)/igs;
 	# SDO_EQUAL(geometry1, geometry2) = 'TRUE'
-	$str =~ s/SDO_EQUAL\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Equals($1)/is;
-	$str =~ s/SDO_EQUAL\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Equals($1)/is;
-	$str =~ s/SDO_EQUAL\s*\(([^\)]+)\)/ST_Equals($1)/is;
+	$str =~ s/SDO_EQUAL\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Equals($1)/igs;
+	$str =~ s/SDO_EQUAL\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Equals($1)/igs;
+	$str =~ s/SDO_EQUAL\s*\(([^\)]+)\)/ST_Equals($1)/igs;
 	# SDO_COVERS(geometry1, geometry2) = 'TRUE'
-	$str =~ s/SDO_COVERS\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Covers($1)/is;
-	$str =~ s/SDO_COVERS\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Covers($1)/is;
-	$str =~ s/SDO_COVERS\s*\(([^\)]+)\)/ST_Covers($1)/is;
+	$str =~ s/SDO_COVERS\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Covers($1)/igs;
+	$str =~ s/SDO_COVERS\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Covers($1)/igs;
+	$str =~ s/SDO_COVERS\s*\(([^\)]+)\)/ST_Covers($1)/igs;
 	# SDO_COVEREDBY(geometry1, geometry2) = 'TRUE'
-	$str =~ s/SDO_COVEREDBY\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_CoveredBy($1)/is;
-	$str =~ s/SDO_COVEREDBY\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_CoveredBy($1)/is;
-	$str =~ s/SDO_COVEREDBY\s*\(([^\)]+)\)/ST_CoveredBy($1)/is;
+	$str =~ s/SDO_COVEREDBY\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_CoveredBy($1)/igs;
+	$str =~ s/SDO_COVEREDBY\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_CoveredBy($1)/igs;
+	$str =~ s/SDO_COVEREDBY\s*\(([^\)]+)\)/ST_CoveredBy($1)/igs;
 	# SDO_ANYINTERACT(geometry1, geometry2) = 'TRUE'
-	$str =~ s/SDO_ANYINTERACT\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Intersects($1)/is;
-	$str =~ s/SDO_ANYINTERACT\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Intersects($1)/is;
-	$str =~ s/SDO_ANYINTERACT\s*\(([^\)]+)\)/ST_Intersects($1)/is;
+	$str =~ s/SDO_ANYINTERACT\s*\((.*?)\)\s*=\s*[']+TRUE[']+/ST_Intersects($1)/igs;
+	$str =~ s/SDO_ANYINTERACT\s*\((.*?)\)\s*=\s*[']+FALSE[']+/NOT ST_Intersects($1)/igs;
+	$str =~ s/SDO_ANYINTERACT\s*\(([^\)]+)\)/ST_Intersects($1)/igs;
 
 	return $str;
 }
@@ -924,7 +810,7 @@
 {
 	my $str = shift;
 
-	my @strings = split(/\s*\|\|\s*/s, $str);
+	my @strings = split(/\|\|/s, $str);
 
 	my @params = ();
 	my $pattern = '';
@@ -1047,19 +933,6 @@
 		$str =~ s/\b$t\b/$Ora2Pg::TYPE{$t}/igs;
 	}
 
-	# Replace local type ref cursor
-	my %locatype = ();
-	my $i = 0;
-	while ($str =~ s/\bTYPE\s+([^\s]+)\s+IS\s+REF\s+CURSOR\s*;/\%LOCALTYPE$i\%/is) {
-		$localtype{$i} = "TYPE $1 IS REF CURSOR;";
-		my $local_type = $1;
-		if ($str =~ s/\b([^\s]+)\s+$local_type\s*;/$1 REFCURSOR;/is) {
-			$str =~ s/\%LOCALTYPE$i\%//is;
-		}
-		$i++;
-	}
-	$str =~ s/\%LOCALTYPE(\d+)\%/$localtype{$1}/gs;
-
         return $str;
 }
 
@@ -1079,7 +952,7 @@
 	# Default cost is testing that mean it at least must be tested
 	my $cost = $FCT_TEST_SCORE;
 	# When evaluating queries size must not be included here
-	if ($type eq 'QUERY' || $type eq 'VIEW') {
+	if ($type eq 'QUERY') {
 		$cost = 0;
 	}
 	$cost_details{'TEST'} = $cost;
@@ -1087,7 +960,7 @@
 	# Set cost following code length
 	my $cost_size = int(length($str)/$SIZE_SCORE) || 1;
 	# When evaluating queries size must not be included here
-	if ($type eq 'QUERY' || $type eq 'VIEW') {
+	if ($type eq 'QUERY') {
 		$cost_size = 0;
 	}
 	$cost += $cost_size;
@@ -1176,8 +1049,8 @@
 	$cost_details{'PLUNIT'} += $n;
 	$n = () = $str =~ m/ADD_MONTHS/igs;
 	$cost_details{'ADD_MONTHS'} += $n;
-	$n = () = $str =~ m/LAST_DAY/igs;
-	$cost_details{'LAST_DAY'} += $n;
+	$n = () = $str =~ m/LAST_DATE/igs;
+	$cost_details{'LAST_DATE'} += $n;
 	$n = () = $str =~ m/NEXT_DAY/igs;
 	$cost_details{'NEXT_DAY'} += $n;
 	$n = () = $str =~ m/MONTHS_BETWEEN/igs;
@@ -1193,19 +1066,6 @@
 	$cost_details{'MDSYS'} += $n;
 	$n = () = $str =~ m/MERGE\sINTO/igs;
 	$cost_details{'MERGE'} += $n;
-	$n = () = $str =~ m/\bCONTAINS\(/igs;
-	$cost_details{'CONTAINS'} += $n;
-	$n = () = $str =~ m/\bSCORE\((?:.*)?\bCONTAINS\(/igs;
-	$cost_details{'SCORE'} += $n;
-	$n = () = $str =~ m/CONTAINS\((?:.*)?\bFUZZY\(/igs;
-	$cost_details{'FUZZY'} += $n;
-	$n = () = $str =~ m/CONTAINS\((?:.*)?\bNEAR\(/igs;
-	$cost_details{'NEAR'} += $n;
-	$n = () = $str =~ m/TO_CHAR\([^,\)]+\)/igs;
-	$cost_details{'TO_CHAR'} += $n;
-	$n = () = $str =~ m/\s+ANYDATA/igs;
-	$cost_details{'ANYDATA'} += $n;
-
 
 	foreach my $f (@ORA_FUNCTIONS) {
 		if ($str =~ /\b$f\b/igs) {
@@ -1414,10 +1274,10 @@
 	$str =~ s/\bFROM_BASE64\(\s*([^\(\),]+)\s*\)/decode(($1)::bytea, 'base64')/igs;
 	$str =~ s/\bHEX\(\s*([^\(\),]+)\s*\)/upper(encode($1::bytea, 'hex'))/igs;
 	$str =~ s/\bINSTR\s*\(\s*([^,]+),\s*('[^']+')\s*\)/position($2 in $1)/igs;
-	$str =~ s/\bLOCATE\(\s*([^\(\),]+)\s*,\s*([^\(\),]+)\s*,\s*([^\(\),]+)\s*\)/position($1 in substr($2, $3)) + $3 - 1/igs;
+	$str =~ s/\bLOCATE\(\s*([^\(\),]+)\s*,\s*([^\(\),]+)\s*,\s*([^\(\),]+)\s*\)/position($1 in substring ($2 from $3)) + $3 - 1/igs;
 	$str =~ s/\bLOCATE\(\s*([^\(\),]+)\s*,\s*([^\(\),]+)\s*\)/position($1 in $2)/igs;
 	$str =~ s/\bLCASE\(/lower\(/igs;
-	$str =~ s/\bMID\(/substr\(/igs;
+	$str =~ s/\bMID\(/substring\(/igs;
 	$str =~ s/\bORD\(/ascii\(/igs;
 	$str =~ s/\bQUOTE\(/quote_literal\(/igs;
 	$str =~ s/\bSPACE\(\s*([^\(\),]+)\s*\)/repeat(' ', $1)/igs;
@@ -1431,7 +1291,7 @@
 	$str =~ s/\bRLIKE/REGEXP/igs;
 	$str =~ s/\bSTD\(/STDDEV_POP\(/igs;
 	$str =~ s/\bSTDDEV\(/STDDEV_POP\(/igs;
-	$str =~ s/\bUUID\(/$class->{uuid_function}\(/igs;
+	$str =~ s/\bUUID\(/uuid_generate_v1\(/igs;
 	$str =~ s/\bNOT REGEXP BINARY/\!\~/igs;
 	$str =~ s/\bREGEXP BINARY/\~/igs;
 	$str =~ s/\bNOT REGEXP/\!\~\*/igs;
@@ -1451,13 +1311,13 @@
 	$str =~ s/\bIFNULL\(\s*([^,]+)\s*,\s*([^\)]+\s*)\)/COALESCE($1, $2)/igs;
 
 	# Rewrite while loop
-	$str =~ s/\bWHILE\s+(.*?)\bEND WHILE\s*;/WHILE $1END LOOP;/igs;
+	$str =~ s/\bWHILE\s+(.*?)END WHILE\s*;/WHILE $1END LOOP;/igs;
 	$str =~ s/\bWHILE\s+(.*?)DO\b/WHILE $1LOOP/igs;
 
 	# Rewrite REPEAT loop
 	my %repl_repeat = ();
 	$i = 0;
-	while ($str =~ s/\bREPEAT\s+(.*?)\bEND REPEAT\s*;/%REPREPEATLBL$i%/igs) {
+	while ($str =~ s/\bREPEAT\s+(.*?)END REPEAT\s*;/%REPREPEATLBL$i%/igs) {
 		my $code = $1;
 		$code =~ s/\bUNTIL(.*)//;
 		$repl_repeat{$i} = "LOOP ${code}EXIT WHEN $1;\nEND LOOP;";
@@ -1675,234 +1535,6 @@
 	return $cost, %cost_details;
 }
 
-sub replace_right_outer_join
-{
-	my $str = shift;
-
-	# process simple form of outer join
-	my $nbouter = $str =~ /(\(\+\)\s*(?:!=|<>|>=|<=|=|>|<|NOT LIKE|LIKE))/igs;
-	# Check that we don't have left outer join too
-	if ($nbouter >= 1 && $str !~ /(?:!=|<>|>=|<=|=|>|<|NOT LIKE|LIKE)\s*[^\s]+\s*\(\+\)/i) {
-		# Extract the FROM clause
-		$str =~ s/(.*)\bFROM\s+(.*?)\s+WHERE\s+(.*?)$/$1FROM FROM_CLAUSE WHERE $3/is;
-		my $from_clause = $2;
-		$from_clause =~ s/"//gs;
-		my @tables = split(/\s*,\s*/, $from_clause);
-		# Set a hash for alias to table mapping
-		my %from_clause_list = ();
-		my %main_from_tables = ();
-		foreach my $table (@tables) {
-			$table =~ s/^\s+//s;
-			$table =~ s/\s+$//s;
-			my ($t, $alias) = split(/\s+/, lc($table));
-			$alias = $t if (!$alias);
-			$from_clause_list{$alias} = $t;
-		}
-
-		# Extract all Oracle's outer join syntax from the where clause
-		my @outer_clauses = ();
-		my %final_from_clause = ();
-		my @tmp_from_list = ();
-		my $start_query = '';
-		my $end_query = '';
-		if ($str =~ s/^(.*FROM FROM_CLAUSE WHERE)//is) {
-			$start_query = $1;
-		}
-		if ($str =~ s/\s+((?:GROUP BY|ORDER BY).*)$//is) {
-			$end_query = $1;
-		}
-		my @predicat = split(/\s+(AND|OR)\s+/i, $str);
-		my $id = 0;
-		# Process only predicat with a obsolete join syntax (+) for now
-		for (my $i = 0; $i <= $#predicat; $i+=2) {
-			next if ($predicat[$i] !~ /\(\+\)/);
-			$predicat[$i] =~ s/(.*)/WHERE_CLAUSE$id /is;
-			my $where_clause = $1;
-			$where_clause =~ s/"//gs;
-			$where_clause =~ s/^\s+//s;
-			$where_clause =~ s/[\s;]+$//s;
-			$where_clause =~ s/\s*\(\+\)//gs;
-			# Split the predicat to retrieve left part, operator and right part
-			my ($l, $o, $r) = split(/\s*(=|LIKE)\s*/i, lc($where_clause));
-			# When the part of the clause are not single fields move them
-			# at their places in the WHERE clause and go to next predicat
-			if (($l !~ /^[^\.]+\.[^\s]+$/) || ($r !~ /^[^\.]+\.[^\s]+$/)) {
-				$predicat[$i] =~ s/WHERE_CLAUSE$id / $l $o $r /s;
-				next;
-			}
-			$id++;
-			# Extract the tablename part of the left clause
-			my $lbl1 = '';
-			my $table_decl1 = $l;
-			if ($l =~ /^([^\.]+)\..*/) {
-				$lbl1 = $1;
-				$table_decl1 = $from_clause_list{$1};
-				$table_decl1 .= " $1" if ($1 ne $from_clause_list{$1});
-			}
-			# Extract the tablename part of the right clause
-			my $lbl2 = '';
-			my $table_decl2 = $r;
-			if ($r =~ /^([^\.]+)\..*/) {
-				$lbl2 = $1;
-				$table_decl2 = $from_clause_list{$1};
-				$table_decl2 .= " $1" if ($1 ne $from_clause_list{$1});
-			}
-			# When this is the first join parse add the left tablename
-			# first then the outer join with the right table
-			if (scalar keys %final_from_clause == 0) {
-				$from_clause = $table_decl1;
-				$final_from_clause{"$lbl1;$lbl2"}{position} = $i;
-				push(@{$final_from_clause{"$lbl1;$lbl2"}{clause}{$table_decl2}}, "$l $o $r");
-			} else {
-				$final_from_clause{"$lbl1;$lbl2"}{position} = $i;
-				push(@{$final_from_clause{"$lbl1;$lbl2"}{clause}{$table_decl1}}, "$l $o $r");
-			}
-		}
-		$str = $start_query . join(' ', @predicat) . ' ' . $end_query;
-
-		# Remove part from the WHERE clause that will be moved into the FROM clause
-		$str =~ s/\s*(AND\s+)?WHERE_CLAUSE\d+ / /igs;
-		$str =~ s/WHERE\s+(AND|OR)\s+/WHERE /is;
-		$str =~ s/WHERE[\s;]+$/;/i;
-		$str =~ s/(\s+)WHERE\s+(ORDER|GROUP)\s+BY/$1$2 BY/is;
-
-		foreach my $t (sort { $final_from_clause{$a}{position} <=> $final_from_clause{$b}{position} } keys %final_from_clause) {
-			foreach my $j (sort keys %{$final_from_clause{$t}{clause}}) {
-				$from_clause .= " RIGHT OUTER JOIN $j ON (" .  join(' AND ', @{$final_from_clause{$t}{clause}{$j}}) . ")"; 
-			}
-		}
-
-		# Append tables to from clause that was not involved into an outer join
-		foreach my $a (keys %from_clause_list) {
-			my $table_decl = "$from_clause_list{$a}";
-			$table_decl .= " $a" if ($a ne $from_clause_list{$a});
-			if ($from_clause !~ /\b$table_decl\b/) {
-				#$from_clause = "$table_decl, " . $from_clause;
-			}
-		}
-
-		$str =~ s/FROM FROM_CLAUSE/FROM $from_clause/s;
-		$str =~ s/[;]*\s*$/;/s;
-	}
-
-	return $str;
-}
-
-sub replace_left_outer_join
-{
-	my $str = shift;
-
-	# process simple form of outer join
-	my $nbouter = $str =~ /((?:!=|<>|>=|<=|=|>|<|NOT LIKE|LIKE)\s*[^\s]+\s*\(\+\))/igs;
-	# Check that we don't have right outer join too
-	if ($nbouter >= 1 && $str !~ /\(\+\)\s*(?:!=|<>|>=|<=|=|>|<|NOT LIKE|LIKE)/i) {
-		# Extract the FROM clause
-		$str =~ s/(.*)\bFROM\s+(.*?)\s+WHERE\s+(.*?)$/$1FROM FROM_CLAUSE WHERE $3/is;
-		my $from_clause = $2;
-		$from_clause =~ s/"//gs;
-
-		my @tables = split(/\s*,\s*/, $from_clause);
-		# Set a hash for alias to table mapping
-		my %from_clause_list = ();
-		my %main_from_tables = ();
-		foreach my $table (@tables) {
-			$table =~ s/^\s+//s;
-			$table =~ s/\s+$//s;
-			my ($t, $alias) = split(/\s+/, lc($table));
-			$alias = $t if (!$alias);
-			$from_clause_list{$alias} = $t;
-		}
-
-		# Extract all Oracle's outer join syntax from the where clause
-		my @outer_clauses = ();
-		my %final_from_clause = ();
-		my @tmp_from_list = ();
-		my $start_query = '';
-		my $end_query = '';
-		if ($str =~ s/^(.*FROM FROM_CLAUSE WHERE)//is) {
-			$start_query = $1;
-		}
-		if ($str =~ s/\s+((?:GROUP BY|ORDER BY).*)$//is) {
-			$end_query = $1;
-		}
-		my @predicat = split(/\s+(AND|OR)\s+/i, $str);
-		my $id = 0;
-		# Process only predicat with a obsolete join syntax (+) for now
-		for (my $i = 0; $i <= $#predicat; $i+=2) {
-			next if ($predicat[$i] !~ /\(\+\)/);
-			$predicat[$i] =~ s/(.*)/WHERE_CLAUSE$id /is;
-			my $where_clause = $1;
-			$where_clause =~ s/"//gs;
-			$where_clause =~ s/^\s+//s;
-			$where_clause =~ s/[\s;]+$//s;
-			$where_clause =~ s/\s*\(\+\)//gs;
-			# Split the predicat to retrieve left part, operator and right part
-			my ($l, $o, $r) = split(/\s*(=|LIKE)\s*/i, lc($where_clause));
-			# When the part of the clause are not single fields move them
-			# at their places in the WHERE clause and go to next predicat
-			if (($l !~ /^[^\.]+\.[^\s]+$/) || ($r !~ /^[^\.]+\.[^\s]+$/)) {
-				$predicat[$i] =~ s/WHERE_CLAUSE$id / $l $o $r /s;
-				next;
-			}
-			$id++;
-			# Extract the tablename part of the left clause
-			my $lbl1 = '';
-			my $table_decl1 = $l;
-			if ($l =~ /^([^\.]+)\..*/) {
-				$lbl1 = $1;
-				$table_decl1 = $from_clause_list{$1};
-				$table_decl1 .= " $1" if ($1 ne $from_clause_list{$1});
-			}
-			# Extract the tablename part of the right clause
-			my $lbl2 = '';
-			my $table_decl2 = $r;
-			if ($r =~ /^([^\.]+)\..*/) {
-				$lbl2 = $1;
-				$table_decl2 = $from_clause_list{$1};
-				$table_decl2 .= " $1" if ($1 ne $from_clause_list{$1});
-			}
-			# When this is the first join parse add the left tablename
-			# first then the outer join with the right table
-			if (scalar keys %final_from_clause == 0) {
-				$from_clause = $table_decl1;
-				$final_from_clause{"$lbl1;$lbl2"}{position} = $i;
-				push(@{$final_from_clause{"$lbl1;$lbl2"}{clause}{$table_decl2}}, "$l $o $r");
-			} else {
-				$final_from_clause{"$lbl1;$lbl2"}{position} = $i;
-				push(@{$final_from_clause{"$lbl1;$lbl2"}{clause}{$table_decl1}}, "$l $o $r");
-			}
-		}
-		$str = $start_query . join(' ', @predicat) . ' ' . $end_query;
-
-		# Remove part from the WHERE clause that will be moved into the FROM clause
-		$str =~ s/\s*(AND\s+)?WHERE_CLAUSE\d+ / /igs;
-		$str =~ s/WHERE\s+(AND|OR)\s+/WHERE /is;
-		$str =~ s/WHERE[\s;]+$/;/i;
-		$str =~ s/(\s+)WHERE\s+(ORDER|GROUP)\s+BY/$1$2 BY/is;
-
-		foreach my $t (sort { $final_from_clause{$a}{position} <=> $final_from_clause{$b}{position} } keys %final_from_clause) {
-			foreach my $j (sort keys %{$final_from_clause{$t}{clause}}) {
-				$from_clause .= " LEFT OUTER JOIN $j ON (" .  join(' AND ', @{$final_from_clause{$t}{clause}{$j}}) . ")"; 
-			}
-		}
-
-		# Append tables to from clause that was not involved into an outer join
-		foreach my $a (keys %from_clause_list) {
-			my $table_decl = "$from_clause_list{$a}";
-			$table_decl .= " $a" if ($a ne $from_clause_list{$a});
-			if ($from_clause !~ /\b$table_decl\b/) {
-				#$from_clause = "$table_decl, " . $from_clause;
-			}
-		}
-
-		$str =~ s/FROM FROM_CLAUSE/FROM $from_clause/s;
-		$str =~ s/[;]*\s*$/;/s;
-	}
-
-	return $str;
-}
-
-
 1;
 
 __END__
@@ -1915,7 +1547,7 @@
 
 =head1 COPYRIGHT
 
-Copyright (c) 2000-2017 Gilles Darold - All rights reserved.
+Copyright (c) 2000-2016 Gilles Darold - All rights reserved.
 
 This program is free software; you can redistribute it and/or modify it under
 the same terms as Perl itself.
diff -Nru ora2pg-18.0/lib/Ora2Pg.pm ora2pg-17.6/lib/Ora2Pg.pm
--- ora2pg-18.0/lib/Ora2Pg.pm	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/lib/Ora2Pg.pm	2016-11-18 05:45:49.000000000 +0800
@@ -4,7 +4,7 @@
 # Name     : Ora2Pg.pm
 # Language : Perl
 # Authors  : Gilles Darold, gilles _AT_ darold _DOT_ net
-# Copyright: Copyright (c) 2000-2017 : Gilles Darold - All rights reserved -
+# Copyright: Copyright (c) 2000-2016 : Gilles Darold - All rights reserved -
 # Function : Main module used to export Oracle database schema to PostgreSQL
 # Usage    : See documentation in this file with perldoc.
 #------------------------------------------------------------------------------
@@ -41,7 +41,7 @@
 #set locale to LC_NUMERIC C
 setlocale(LC_NUMERIC,"C");
 
-$VERSION = '18.0';
+$VERSION = '17.6b';
 $PSQL = $ENV{PLSQL} || 'psql';
 
 $| = 1;
@@ -58,8 +58,8 @@
 # Exclude table generated by partition logging, materialized view logs, statistis on spatial index,
 # spatial index tables, sequence index tables, interMedia Text index tables and Unified Audit tables.
 # LogMiner, Oracle Advanced Replication, hash table used by loadjava.
-our @EXCLUDED_TABLES = ('USLOG\$_.*', 'MLOG\$_.*', 'RUPD\$_.*', 'MDXT_.*', 'MDRT_.*', 'MDRS_.*', 'DR\$.*', 'CLI_SWP\$.*', 'LOGMNR\$.*', 'REPCAT\$.*', 'JAVA\$.*','AQ\$.*','BIN\$.*','SDO_GR_.*','.*\$JAVA\$.*','PROF\$.*','TOAD_PLAN_.*','SYS_.*\$');
-our @EXCLUDED_TABLES_8I = ('USLOG$_%', 'MLOG$_%', 'RUPD$_%', 'MDXT_%', 'MDRT_%', 'MDRS_%', 'DR$%', 'CLI_SWP$%', 'LOGMNR$%', 'REPCAT$%', 'JAVA$%', 'AQ$%','BIN$%','%$JAVA$%','PROF$%','TOAD_PLAN_%','SYS_%$');
+our @EXCLUDED_TABLES = ('USLOG\$_.*', 'MLOG\$_.*', 'RUPD\$_.*', 'MDXT_.*', 'MDRT_.*', 'MDRS_.*', 'DR\$.*', 'CLI_SWP\$.*', 'LOGMNR\$.*', 'REPCAT\$.*', 'JAVA\$.*','AQ\$.*','BIN\$.*','SDO_GR_.*','.*\$JAVA\$.*','PROF\$.*','TOAD_PLAN_.*');
+our @EXCLUDED_TABLES_8I = ('USLOG$_%', 'MLOG$_%', 'RUPD$_%', 'MDXT_%', 'MDRT_%', 'MDRS_%', 'DR$%', 'CLI_SWP$%', 'LOGMNR$%', 'REPCAT$%', 'JAVA$%', 'AQ$%','BIN$%','%$JAVA$%','PROF$%','TOAD_PLAN_%');
 
 our @Oracle_tables = qw(
 EVT_CARRIER_CONFIGURATION
@@ -460,6 +460,13 @@
 		} else {
 			$filehdl = new IO::File;
 			$filehdl->open(">$outfile") or $self->logit("FATAL: Can't open $outfile: $!\n", 0, 1);
+			# Force Perl to use utf8 I/O encoding by default
+			if ( !$self->{'binmode'} || ($self->{nls_lang} =~ /UTF8/i) ) {
+				use open ':utf8';
+				$filehdl->binmode(':utf8');
+			} elsif ($self->{'binmode'} =~ /^:/) {
+				$filehdl->binmode($self->{binmode}) or die "FATAL: can't use open layer $self->{binmode} in append_export_file()\n";
+			}
 		}
 		$filehdl->autoflush(1) if (defined $filehdl && !$self->{compress});
 	}
@@ -500,7 +507,13 @@
 		} else {
 			$self->{fhout} = new IO::File;
 			$self->{fhout}->open(">>$outfile") or $self->logit("FATAL: Can't open $outfile: $!\n", 0, 1);
-			set_binmode($self->{fhout});
+			# Force Perl to use utf8 I/O encoding by default
+			if ( !$self->{'binmode'} || ($self->{nls_lang} =~ /UTF8/i) ) {
+				use open ':utf8';
+				$self->{fhout}->binmode(':utf8');
+			} elsif ($self->{'binmode'} =~ /^:/) {
+				$self->{fhout}->binmode($self->{binmode}) or die "FATAL: can't use open layer $self->{binmode} in append_export_file()\n";
+			}
 		}
 		if ( $self->{compress} && (($self->{jobs} > 1) || ($self->{oracle_copies} > 1)) ) {
 			die "FATAL: you can't use compressed output with parallel dump\n";
@@ -551,6 +564,13 @@
 			$filehdl = new IO::File;
 			$filehdl->open(">>$outfile") or $self->logit("FATAL: Can't open $outfile: $!\n", 0, 1);
 			$filehdl->autoflush(1);
+			# Force Perl to use utf8 I/O encoding by default
+			if ( !$self->{'binmode'} || ($self->{nls_lang} =~ /UTF8/i) ) {
+				use open ':utf8';
+				$filehdl->binmode(':utf8');
+			} elsif ($self->{'binmode'} =~ /^:/) {
+				$filehdl->binmode($self->{binmode}) or die "FATAL: can't use open layer $self->{binmode} in append_export_file()\n";
+			}
 		}
 	}
 
@@ -814,13 +834,6 @@
 
 	# Use FTS index to convert CONTEXT Oracle's indexes by default
 	$self->{context_as_trgm} = 0;
-	$self->{fts_index_only}  = 1;
-	$self->{fts_config}      = '';
-	$self->{use_unaccent}    = 1;
-	$self->{use_lower_unaccent} = 1;
-
-	# Enable rewrite of outer join by default.
-	$self->{rewrite_outer_join} = 1;
 
 	# Initialyze following configuration file
 	foreach my $k (sort keys %AConfig) {
@@ -862,9 +875,6 @@
 		$self->{transaction} = 'SET TRANSACTION ISOLATION LEVEL SERIALIZABLE';
 	}
 
-	# Set default function to use for uuid generation
-	$self->{uuid_function} ||= 'uuid_generate_v4';
-
 	# Initial command to execute at Oracle connexion
 	$self->{ora_initial_command} ||= '';
 
@@ -908,7 +918,7 @@
 	# Overwrite configuration with all given parameters
 	# and try to preserve backward compatibility
 	foreach my $k (keys %options) {
-		if (($k eq 'allow') && $options{allow}) {
+		if ((lc($k) eq 'allow') && $options{allow}) {
 			$self->{limited} = ();
 			# Syntax: TABLE[regex1 regex2 ...];VIEW[regex1 regex2 ...];glob_regex1 glob_regex2 ...
 			my @allow_vlist = split(/\s*;\s*/, $options{allow});
@@ -919,7 +929,7 @@
 					push(@{$self->{limited}{ALL}}, split(/[\s,]+/, $a) );
 				}
 			}
-		} elsif (($k eq 'exclude') && $options{exclude}) {
+		} elsif ((lc($k) eq 'exclude') && $options{exclude}) {
 			$self->{excluded} = ();
 			# Syntax: TABLE[regex1 regex2 ...];VIEW[regex1 regex2 ...];glob_regex1 glob_regex2 ...
 			my @exclude_vlist = split(/\s*;\s*/, $options{exclude});
@@ -930,16 +940,16 @@
 					push(@{$self->{excluded}{ALL}}, split(/[\s,]+/, $a) );
 				}
 			}
-		} elsif (($k eq 'view_as_table') && $options{view_as_table}) {
+		} elsif ((lc($k) eq 'view_as_table') && $options{view_as_table}) {
 			$self->{view_as_table} = ();
 			push(@{$self->{view_as_table}}, split(/[\s;,]+/, $options{view_as_table}) );
-		} elsif (($k eq 'datasource') && $options{datasource}) {
+		} elsif ((lc($k) eq 'datasource') && $options{datasource}) {
 			$self->{oracle_dsn} = $options{datasource};
-		} elsif (($k eq 'user') && $options{user}) {
+		} elsif ((lc($k) eq 'user') && $options{user}) {
 			$self->{oracle_user} = $options{user};
-		} elsif (($k eq 'password') && $options{password}) {
+		} elsif ((lc($k) eq 'password') && $options{password}) {
 			$self->{oracle_pwd} = $options{password};
-		} elsif (($k eq 'mysql') && $options{mysql}) {
+		} elsif ((lc($k) eq 'mysql') && $options{mysql}) {
 			$self->{is_mysql} = $options{is_mysql};
 		} elsif ($options{$k} ne '') {
 			$self->{"\L$k\E"} = $options{$k};
@@ -1209,9 +1219,6 @@
 		if ($self->{type} eq 'LOAD') {
 			$self->logit("FATAL: with LOAD you must provide an input file\n", 0, 1);
 		}
-		if (!$self->{oracle_dsn} || ($self->{oracle_dsn} =~ /;sid=SIDNAME/)) {
-			$self->logit("FATAL: you must set ORACLE_DSN in ora2pg.conf or use a DDL input file.\n", 0, 1);
-		}
 		# Connect the database
 		if ($self->{oracle_dsn} =~ /dbi:mysql/i) {
 			$self->{dbh} = $self->_mysql_connection();
@@ -1354,14 +1361,7 @@
 
 	$self->logit("Trying to connect to database: $self->{oracle_dsn}\n", 1) if (!$quiet);
 
-	my $dbh = DBI->connect($self->{oracle_dsn}, $self->{oracle_user}, $self->{oracle_pwd},
-		{
-			ora_envhp => 0,
-			LongReadLen=>$self->{longreadlen},
-			LongTruncOk=>$self->{longtruncok},
-			AutoInactiveDestroy => 1
-		}
-	);
+	my $dbh = DBI->connect($self->{oracle_dsn}, $self->{oracle_user}, $self->{oracle_pwd}, {ora_envhp => 0, LongReadLen=>$self->{longreadlen}, LongTruncOk=>$self->{longtruncok}, AutoInactiveDestroy => 1});
 
 	# Check for connection failure
 	if (!$dbh) {
@@ -1476,8 +1476,14 @@
 	# encoding given in the BINMODE configuration directive.
 	# See http://perldoc.perl.org/5.14.2/open.html for values
 	# that can be used. Default is :utf8
-	set_binmode();
-
+	if ( !$self->{'binmode'} || ($self->{nls_lang} =~ /UTF8/i) ) {
+		use open ':utf8';
+		$self->{'binmode'} = ':utf8';
+	} elsif ($self->{'binmode'} =~ /^:/) {
+		eval "use open '$self->{binmode}';" or die "FATAL: can't use open layer $self->{binmode}\n";
+	} elsif ($self->{'binmode'}) {
+		eval "use open 'encoding($self->{binmode})';" or die "FATAL: can't use open layer :encoding($self->{binmode})\n";
+	}
 	# Set default PostgreSQL client encoding to UTF8
 	if (!$self->{client_encoding} || ($self->{nls_lang} =~ /UTF8/) ) {
 		$self->{client_encoding} = 'UTF8';
@@ -1485,28 +1491,6 @@
 
 }
 
-sub set_binmode
-{
-
-        if ( !$self->{'binmode'} || ($self->{nls_lang} =~ /UTF8/i) ) {
-                use open ':utf8';
-        } elsif ($self->{'binmode'} =~ /^:/) {
-                eval "use open '$self->{binmode}';" or die "FATAL: can't use open layer $self->{binmode}\n";
-        } elsif ($self->{'binmode'}) {
-                eval "use open 'encoding($self->{binmode})';" or die "FATAL: can't use open layer :encoding($self->{binmode})\n";
-        }
-        # Set default PostgreSQL client encoding to UTF8
-        if (!$self->{client_encoding} || ($self->{nls_lang} =~ /UTF8/) ) {
-                $self->{client_encoding} = 'UTF8';
-        }
-
-	if ($#_ == 0) {
-		my $enc = $self->{'binmode'} || 'utf8';
-		$enc =~ s/^://;
-		binmode($_[0], ":encoding($enc)");
-	}
-
-}
 
 # We provide a DESTROY method so that the autoloader doesn't
 # bother trying to find it. We also close the DB connexion
@@ -1980,13 +1964,13 @@
 	my $str = shift();
 
 	my $ct = '';
-	my @parts = split(/\b(BEGIN|DECLARE|END\s*(?!IF|LOOP|CASE|INTO|FROM|,)[^;\s]*\s*;)/, $str);
+	my @parts = split(/(BEGIN|DECLARE|END\s*(?!IF|LOOP|CASE|INTO|FROM|,)[^;\s]*\s*;)/, $str);
 	my $code = '';
 	my $other = '';
 	my $i = 0;
 	for (; $i <= $#parts; $i++) {
 		$ct++ if ($parts[$i] =~ /\bBEGIN\b/);
-		$ct-- if ($parts[$i] =~ /\bEND\s*(?!IF|LOOP|CASE|INTO|FROM|,)[^;\s]*\s*;/);
+		$ct-- if ($parts[$i] =~ /END\s*(?!IF|LOOP|CASE|INTO|FROM|,)[^;\s]*\s*;/);
 		if ( ($ct ne '') && ($ct == 0) ) {
 			$code .= $parts[$i];
 			last;
@@ -2017,12 +2001,9 @@
 			columns => ()
 		) };
 		push(@{$self->{tables}{$tb_name}{unique_key}{$1}{columns}}, split(/\s*,\s*/, $3));
-	} elsif ($c =~ /^([^\s]+) CHECK\s*\((.*)\)/i) {
+	} elsif ($c =~ /^([^\s]+) CHECK\s*\(([^\)]+)\)/i) {
 		my %tmp = ($1 => $2);
-		$self->{tables}{$tb_name}{check_constraint}{constraint}{$1}{condition} = $2;
-		if ($c =~ /NOVALIDATE/i) {
-			$self->{tables}{$tb_name}{check_constraint}{constraint}{$1}{validate} = 'NOT VALIDATED';
-		}
+		$self->{tables}{$tb_name}{check_constraint}{constraint}{$1} = $2;
 	} elsif ($c =~ /^([^\s]+) FOREIGN KEY (\([^\)]+\))?\s*REFERENCES ([^\(]+)\(([^\)]+)\)/i) {
 		my $c_name = $1;
 		if ($2) {
@@ -2044,10 +2025,8 @@
 		$deferrable = 'DEFERRABLE' if ($c =~ /DEFERRABLE/);
 		my $deferred = '';
 		$deferred = 'DEFERRED' if ($c =~ /INITIALLY DEFERRED/);
-		my $novalidate = '';
-		$novalidate = 'NOT VALIDATED' if ($c =~ /NOVALIDATE/);
-		# CONSTRAINT_NAME,R_CONSTRAINT_NAME,SEARCH_CONDITION,DELETE_RULE,$deferrable,DEFERRED,R_OWNER,TABLE_NAME,OWNER,UPDATE_RULE,VALIDATED
-		push(@{$self->{tables}{$tb_name}{foreign_key}}, [ ($c_name,'','','',$deferrable,$deferred,'',$tb_name,'','',$novalidate) ]);
+		# CONSTRAINT_NAME,R_CONSTRAINT_NAME,SEARCH_CONDITION,DELETE_RULE,$deferrable,DEFERRED,R_OWNER,TABLE_NAME,OWNER
+		push(@{$self->{tables}{$tb_name}{foreign_key}}, [ ($c_name,'','','',$deferrable,$deferred,'',$tb_name,'') ]);
 	}
 }
 
@@ -2095,284 +2074,237 @@
 
 	my $tid = 0; 
 
-	my @statements = split(/\s*;\s*/, $content);
+	# Remove potential dynamic table creation before parsing
+	while ($content =~ s/'(TRUNCATE|CREATE)\s+(GLOBAL|UNIQUE)?\s*(TEMPORARY)?\s*(TABLE|INDEX)([^']+)'//i) {};
+	while ($content =~ s/'ALTER\s+TABLE\s*([^']+)'//i) {};
 
-	foreach $content (@statements) {
-		$content .= ';';
-		# Remove potential dynamic table creation before parsing
-		$content =~ s/'(TRUNCATE|CREATE)\s+(GLOBAL|UNIQUE)?\s*(TEMPORARY)?\s*(TABLE|INDEX)([^']+)'//is;
-		$content =~ s/'ALTER\s+TABLE\s*([^']+)'//is;
-		if ($content =~ s/TRUNCATE TABLE\s+([^\s;]+)([^;]*);//is) {
-			my $tb_name = $1;
-			$tb_name =~ s/"//gs;
-			if (!exists $self->{tables}{$tb_name}{table_info}{type}) {
-				$self->{tables}{$tb_name}{table_info}{type} = 'TABLE';
-				$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
-				$tid++;
-				$self->{tables}{$tb_name}{internal_id} = $tid;
-			}
-			$self->{tables}{$tb_name}{truncate_table} = 1;
-		} elsif ($content =~ s/CREATE\s+(GLOBAL)?\s*(TEMPORARY)?\s*TABLE[\s]+([^\s]+)\s+AS\s+([^;]+);//is) {
-			my $tb_name = $3;
-			$tb_name =~ s/"//gs;
-			my $tb_def = $4;
-			$tb_def =~ s/\s+/ /gs;
-			$self->{tables}{$tb_name}{table_info}{type} = 'TEMPORARY ' if ($2);
-			$self->{tables}{$tb_name}{table_info}{type} .= 'TABLE';
-			$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
-			$tid++;
-			$self->{tables}{$tb_name}{internal_id} = $tid;
-			$self->{tables}{$tb_name}{table_as} = $tb_def;
-		} elsif ($content =~ s/CREATE\s+(GLOBAL)?\s*(TEMPORARY)?\s*TABLE[\s]+([^\s\(]+)\s*([^;]+);//is) {
-			my $tb_name = $3;
-			my $tb_def  = $4;
-			my $tb_param  = '';
-			$tb_name =~ s/"//gs;
-			$self->{tables}{$tb_name}{table_info}{type} = 'TEMPORARY ' if ($2);
-			$self->{tables}{$tb_name}{table_info}{type} .= 'TABLE';
+	while ($content =~ s/TRUNCATE TABLE\s+([^;]+);//i) {
+		my $tb_name = $1;
+		$tb_name =~ s/"//g;
+		if (!exists $self->{tables}{$tb_name}{table_info}{type}) {
+			$self->{tables}{$tb_name}{table_info}{type} = 'TABLE';
 			$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
 			$tid++;
 			$self->{tables}{$tb_name}{internal_id} = $tid;
+		}
+		$self->{tables}{$tb_name}{truncate_table} = 1;
+	}
 
-			($tb_def, $tb_param) = &_split_table_definition($tb_def);
-			my @column_defs = split(/\s*,\s*/, $tb_def);
-			map { s/^\s+//; s/\s+$//; } @column_defs;
-			# Fix split on scale comma, for example NUMBER(9,4)
-			for (my $i = 0; $i <= $#column_defs; $i++) {
-				if ($column_defs[$i] =~ /^\d+/) {
-					$column_defs[$i-1] .= ",$column_defs[$i]";
-					$column_defs[$i] = '';
-				}
-			}
-			# Fix split on multicolumn's constraints, ex: UNIQUE (last_name,first_name) 
-			for (my $i = $#column_defs; $i >= 0; $i--) {
-				if ( ($column_defs[$i] !~ /\s/) || ($column_defs[$i] =~ /^[^\(]+\) REFERENCES/i) || ($column_defs[$i] =~ /^[^\(]+\) USING INDEX/ii)) {
-					$column_defs[$i-1] .= ",$column_defs[$i]";
-					$column_defs[$i] = '';
-				}
-			}
-			my $pos = 0;
-			my $cur_c_name = '';
-			foreach my $c (@column_defs) {
-				next if (!$c);
-				# Remove things that are not possible with postgres
-				$c =~ s/(PRIMARY KEY.*)NOT NULL/$1/is;
-				# Rewrite some parts for easiest/generic parsing
-				$c =~ s/^(PRIMARY KEY|UNIQUE)/CONSTRAINT ora2pg_ukey_$tb_name $1/is;
-				$c =~ s/^(CHECK[^,;]+)DEFERRABLE\s+INITIALLY\s+DEFERRED/$1/is;
-				$c =~ s/^CHECK\b/CONSTRAINT ora2pg_ckey_$tb_name CHECK/is;
-				$c =~ s/^FOREIGN KEY/CONSTRAINT ora2pg_fkey_$tb_name FOREIGN KEY/is;
-				# Get column name
-				if ($c =~ s/^\s*([^\s]+)\s*//s) {
-					my $c_name = $1;
-					$c_name =~ s/"//g;
-					# Retrieve all columns information
-					if (uc($c_name) ne 'CONSTRAINT') {
-						$cur_c_name = $c_name;
-						my $c_type = '';
-						if ($c =~ s/^([^\s\(]+)\s*//s) {
-							$c_type = $1;
-						} else {
-							next;
-						}
-						my $c_length = '';
-						my $c_scale = '';
-						if ($c =~ s/^\(([^\)]+)\)\s*//s) {
-							$c_length = $1;
-							if ($c_length =~ s/\s*,\s*(\d+)\s*//s) {
-								$c_scale = $1;
-							}
-						}
-						my $c_nullable = 1;
-						if ($c =~ s/CONSTRAINT\s*([^\s]+)?\s*NOT NULL//s) {
-							$c_nullable = 0;
-						} elsif ($c =~ s/NOT NULL//) {
-							$c_nullable = 0;
-						}
-
-						if (($c =~ s/(UNIQUE|PRIMARY KEY)\s*\(([^\)]+)\)//is) || ($c =~ s/(UNIQUE|PRIMARY KEY)\s*//is)) {
-							my $pk_name = 'ora2pg_ukey_' . $c_name; 
-							my $cols = $c_name;
-							if ($2) {
-								$cols = $2;
-							}
-							$self->_parse_constraint($tb_name, $c_name, "$pk_name $1 ($cols)");
+	while ($content =~ s/CREATE\s+(GLOBAL)?\s*(TEMPORARY)?\s*TABLE[\s]+([^\s]+)\s+AS\s+([^;]+);//i) {
+		my $tb_name = $3;
+		$tb_name =~ s/"//g;
+		my $tb_def = $4;
+		$tb_def =~ s/\s+/ /g;
+		$self->{tables}{$tb_name}{table_info}{type} = 'TEMPORARY ' if ($2);
+		$self->{tables}{$tb_name}{table_info}{type} .= 'TABLE';
+		$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
+		$tid++;
+		$self->{tables}{$tb_name}{internal_id} = $tid;
+		$self->{tables}{$tb_name}{table_as} = $tb_def;
+	}
 
-						} elsif ( ($c =~ s/CONSTRAINT\s([^\s]+)\sCHECK\s*\(([^\)]+)\)//is) || ($c =~ s/CHECK\s*\(([^\)]+)\)//is) ) {
-							my $pk_name = 'ora2pg_ckey_' . $c_name; 
-							my $chk_search = $1;
-							if ($2) {
-								$pk_name = $1;
-								$chk_search = $2;
-							}
-							$self->_parse_constraint($tb_name, $c_name, "$pk_name CHECK ($chk_search)");
+	while ($content =~ s/CREATE\s+(GLOBAL)?\s*(TEMPORARY)?\s*TABLE[\s]+([^\s\(]+)\s*([^;]+);//i) {
+		my $tb_name = $3;
+		my $tb_def  = $4;
+		my $tb_param  = '';
+		$tb_name =~ s/"//g;
+		$self->{tables}{$tb_name}{table_info}{type} = 'TEMPORARY ' if ($2);
+		$self->{tables}{$tb_name}{table_info}{type} .= 'TABLE';
+		$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
+		$tid++;
+		$self->{tables}{$tb_name}{internal_id} = $tid;
 
-						} elsif ($c =~ s/REFERENCES\s+([^\(]+)\(([^\)]+)\)//is) {
+		($tb_def, $tb_param) = &_split_table_definition($tb_def);
+		my @column_defs = split(/\s*,\s*/, $tb_def);
+		map { s/^\s+//; s/\s+$//; } @column_defs;
+		# Fix split on scale comma, for example NUMBER(9,4)
+		for (my $i = 0; $i <= $#column_defs; $i++) {
+			if ($column_defs[$i] =~ /^\d+/) {
+				$column_defs[$i-1] .= ",$column_defs[$i]";
+				$column_defs[$i] = '';
+			}
+		}
+		# Fix split on multicolumn's constraints, ex: UNIQUE (last_name,first_name) 
+		for (my $i = $#column_defs; $i >= 0; $i--) {
+			if ( ($column_defs[$i] !~ /\s/) || ($column_defs[$i] =~ /^[^\(]+\) REFERENCES/i) || ($column_defs[$i] =~ /^[^\(]+\) USING INDEX/ii)) {
+				$column_defs[$i-1] .= ",$column_defs[$i]";
+				$column_defs[$i] = '';
+			}
+		}
+		my $pos = 0;
+		my $cur_c_name = '';
+		foreach my $c (@column_defs) {
+			next if (!$c);
+			# Remove things that are not possible with postgres
+			$c =~ s/(PRIMARY KEY.*)NOT NULL/$1/i;
+			# Rewrite some parts for easiest/generic parsing
+			$c =~ s/^(PRIMARY KEY|UNIQUE)/CONSTRAINT ora2pg_ukey_$tb_name $1/i;
+			$c =~ s/^(CHECK[^,;]+)DEFERRABLE\s+INITIALLY\s+DEFERRED/$1/i;
+			$c =~ s/^CHECK\b/CONSTRAINT ora2pg_ckey_$tb_name CHECK/i;
+			$c =~ s/^FOREIGN KEY/CONSTRAINT ora2pg_fkey_$tb_name FOREIGN KEY/i;
+			# Get column name
+			if ($c =~ s/^\s*([^\s]+)\s*//) {
+				my $c_name = $1;
+				$c_name =~ s/"//g;
+				# Retrieve all columns information
+				if (uc($c_name) ne 'CONSTRAINT') {
+					$cur_c_name = $c_name;
+					my $c_type = '';
+					if ($c =~ s/^([^\s\(]+)\s*//) {
+						$c_type = $1;
+					} else {
+						next;
+					}
+					my $c_length = '';
+					my $c_scale = '';
+					if ($c =~ s/^\(([^\)]+)\)\s*//) {
+						$c_length = $1;
+						if ($c_length =~ s/\s*,\s*(\d+)\s*//) {
+							$c_scale = $1;
+						}
+					}
+					my $c_nullable = 1;
+					if ($c =~ s/CONSTRAINT\s*([^\s]+)?\s*NOT NULL//) {
+						$c_nullable = 0;
+					} elsif ($c =~ s/NOT NULL//) {
+						$c_nullable = 0;
+					}
 
-							my $pk_name = 'ora2pg_fkey_' . $c_name; 
-							my $chk_search = $1 . "($2)";
-							$chk_search =~ s/\s+//gs;
-							$self->_parse_constraint($tb_name, $c_name, "$pk_name FOREIGN KEY ($c_name) REFERENCES $chk_search");
+					if (($c =~ s/(UNIQUE|PRIMARY KEY)\s*\(([^\)]+)\)//i) || ($c =~ s/(UNIQUE|PRIMARY KEY)\s*//i)) {
+						my $pk_name = 'ora2pg_ukey_' . $c_name; 
+						my $cols = $c_name;
+						if ($2) {
+							$cols = $2;
 						}
+						$self->_parse_constraint($tb_name, $c_name, "$pk_name $1 ($cols)");
 
-						my $auto_incr = 0;
-						if ($c =~ s/\s*AUTO_INCREMENT\s*//is) {
-							$auto_incr = 1;
+					} elsif ( ($c =~ s/CONSTRAINT\s([^\s]+)\sCHECK\s*\(([^\)]+)\)//i) || ($c =~ s/CHECK\s*\(([^\)]+)\)//i) ) {
+						my $pk_name = 'ora2pg_ckey_' . $c_name; 
+						my $chk_search = $1;
+						if ($2) {
+							$pk_name = $1;
+							$chk_search = $2;
 						}
+						$self->_parse_constraint($tb_name, $c_name, "$pk_name CHECK ($chk_search)");
 
-						my $c_default = '';
-						if ($c =~ s/DEFAULT\s+([^\s]+)\s*//is) {
-							if (!$self->{plsql_pgsql}) {
-								$c_default = $1;
-							} else {
-								$c_default = Ora2Pg::PLSQL::convert_plsql_code($self, $1);
-							}
+					} elsif ($c =~ s/REFERENCES\s+([^\(]+)\(([^\)]+)\)//i) {
+
+						my $pk_name = 'ora2pg_fkey_' . $c_name; 
+						my $chk_search = $1 . "($2)";
+						$chk_search =~ s/\s+//g;
+						$self->_parse_constraint($tb_name, $c_name, "$pk_name FOREIGN KEY ($c_name) REFERENCES $chk_search");
+					}
+
+					my $auto_incr = 0;
+					if ($c =~ s/\s*AUTO_INCREMENT\s*//i) {
+						$auto_incr = 1;
+					}
+
+					my $c_default = '';
+					if ($c =~ s/DEFAULT\s+([^\s]+)\s*//) {
+						if (!$self->{plsql_pgsql}) {
+							$c_default = $1;
+						} else {
+							$c_default = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $1);
 						}
-						#COLUMN_NAME, DATA_TYPE, DATA_LENGTH, NULLABLE, DATA_DEFAULT, DATA_PRECISION, DATA_SCALE, CHAR_LENGTH, TABLE_NAME, OWNER
-						push(@{$self->{tables}{$tb_name}{column_info}{$c_name}}, ($c_name, $c_type, $c_length, $c_nullable, $c_default, $c_length, $c_scale, $c_length, $tb_name, '', $pos, $auto_incr));
-					} else {
-						$self->_parse_constraint($tb_name, $cur_c_name, $c);
 					}
+					#COLUMN_NAME, DATA_TYPE, DATA_LENGTH, NULLABLE, DATA_DEFAULT, DATA_PRECISION, DATA_SCALE, CHAR_LENGTH, TABLE_NAME, OWNER
+					push(@{$self->{tables}{$tb_name}{column_info}{$c_name}}, ($c_name, $c_type, $c_length, $c_nullable, $c_default, $c_length, $c_scale, $c_length, $tb_name, '', $pos, $auto_incr));
+				} else {
+					$self->_parse_constraint($tb_name, $cur_c_name, $c);
 				}
-				$pos++;
-			}
-			map {s/^/\t/; s/$/,\n/; } @column_defs;
-			# look for storage information
-			if ($tb_param =~ /TABLESPACE[\s]+([^\s]+)/is) {
-				$self->{tables}{$tb_name}{table_info}{tablespace} = $1;
-				$self->{tables}{$tb_name}{table_info}{tablespace} =~ s/"//gs;
-			}
-			if ($tb_param =~ /PCTFREE\s+(\d+)/is) {
-				$self->{tables}{$tb_name}{table_info}{fillfactor} = $1;
-			}
-			if ($tb_param =~ /\bNOLOGGING\b/is) {
-				$self->{tables}{$tb_name}{table_info}{nologging} = 1;
-			}
-
-		} elsif ($content =~ s/CREATE\s+(UNIQUE|BITMAP)?\s*INDEX\s+([^\s]+)\s+ON\s+([^\s\(]+)\s*\((.*)\)//is) {
-			my $is_unique = $1;
-			my $idx_name = $2;
-			my $tb_name = $3;
-			my $idx_def = $4;
-			$idx_name =~ s/"//gs;
-			$tb_name =~ s/\s+/ /gs;
-			$idx_def =~ s/\s+/ /gs;
-			$idx_def =~ s/\s*nologging//is;
-			$idx_def =~ s/STORAGE\s*\([^\)]+\)\s*//is;
-			$idx_def =~ s/COMPRESS(\s+\d+)?\s*//is;
-			# look for storage information
-			if ($idx_def =~ s/TABLESPACE\s*([^\s]+)\s*//is) {
-				$self->{tables}{$tb_name}{idx_tbsp}{$idx_name} = $1;
-				$self->{tables}{$tb_name}{idx_tbsp}{$idx_name} =~ s/"//gs;
-			}
-			if ($idx_def =~ s/ONLINE\s*//is) {
-				$self->{tables}{$tb_name}{concurrently}{$idx_name} = 1;
-			}
-			if ($idx_def =~ s/INDEXTYPE\s+IS\s+.*SPATIAL_INDEX//is) {
-				$self->{tables}{$tb_name}{spatial}{$idx_name} = 1;
-				$self->{tables}{$tb_name}{idx_type}{$idx_name}{type} = 'SPATIAL INDEX';
-				$self->{tables}{$tb_name}{idx_type}{$idx_name}{type_name} = 'SPATIAL_INDEX';
-			}
-			if ($idx_def =~ s/layer_gtype=([^\s,]+)//is) {
-				$self->{tables}{$tb_name}{idx_type}{$idx_name}{type_constraint} = uc($1);
-			}
-			if ($idx_def =~ s/sdo_indx_dims=(\d)//is) {
-				$self->{tables}{$tb_name}{idx_type}{$idx_name}{type_dims} = $1;
-			}
-			$idx_def =~ s/\)[^\)]*$//s;
-			if ($is_unique eq 'BITMAP') {
-				$is_unique = '';
-				$self->{tables}{$tb_name}{idx_type}{$idx_name}{type_name} = 'BITMAP';
-			}
-			$self->{tables}{$tb_name}{uniqueness}{$idx_name} = $is_unique || '';
-			$idx_def =~ s/SYS_EXTRACT_UTC\s*\(([^\)]+)\)/$1/isg;
-			push(@{$self->{tables}{$tb_name}{indexes}{$idx_name}}, $idx_def);
-			$self->{tables}{$tb_name}{idx_type}{$idx_name}{type} = 'NORMAL';
-			if ($idx_def =~ /\(/s) {
-				$self->{tables}{$tb_name}{idx_type}{$idx_name}{type} = 'FUNCTION-BASED';
-			}
-
-			if (!exists $self->{tables}{$tb_name}{table_info}{type}) {
-				$self->{tables}{$tb_name}{table_info}{type} = 'TABLE';
-				$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
-				$tid++;
-				$self->{tables}{$tb_name}{internal_id} = $tid;
-			}
-
-		} elsif ($content =~ s/ALTER\s+TABLE\s+([^\s]+)\s+ADD\s*\(*\s*(.*)//is) {
-			my $tb_name = $1;
-			$tb_name =~ s/"//g;
-			my $tb_def = $2;
-			#$tb_def =~ s/\s+/ /g;
-			# Oracle allow multiple constraints declaration inside a single ALTER TABLE
-			while ($tb_def =~ s/CONSTRAINT\s+([^\s]+)\s+CHECK\s*(\(.*?\))\s+(ENABLE|DISABLE|VALIDATE|NOVALIDATE|DEFERRABLE|INITIALLY|DEFERRED|USING\s+INDEX|\s+)+([^,]*)//is) {
-				my $constname = $1;
-				my $code = $2;
-				my $states = $3;
-				$tbspace_move = $4;
-				if (!exists $self->{tables}{$tb_name}{table_info}{type}) {
-					$self->{tables}{$tb_name}{table_info}{type} = 'TABLE';
-					$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
-					$tid++;
-					$self->{tables}{$tb_name}{internal_id} = $tid;
-				}
-				my $validate = '';
-				$validate = ' NOT VALID' if ( $states =~ /NOVALIDATE/is);
-				push(@{$self->{tables}{$tb_name}{alter_table}}, "ADD CONSTRAINT \L$constname\E CHECK $code$validate");
-				if ( $tbspace_move =~ /USING\s+INDEX\s+TABLESPACE\s+([^\s]+)/is) {
-					$tbspace_move = "ALTER INDEX $constname SET TABLESPACE " . lc($1) if ($self->{use_tablespace});
-				} elsif ($tbspace_move =~ /USING\s+INDEX\s+([^\s]+)/is) {
-					$self->{tables}{$tb_name}{alter_table}[-1] .= " USING INDEX " . lc($1);
-				}
-				push(@{$self->{tables}{$tb_name}{alter_index}}, $tbspace_move) if ($tbspace_move);
-				
-			}
-			while ($tb_def =~ s/CONSTRAINT\s+([^\s]+)\s+FOREIGN\s+KEY\s*(\(.*?\)\s+REFERENCES\s+[^\s]+\s*\(.*?\))\s+([^,\)]+)//is) {
-				my $constname = $1;
-				my $other_def = $3;
-				if (!exists $self->{tables}{$tb_name}{table_info}{type}) {
-					$self->{tables}{$tb_name}{table_info}{type} = 'TABLE';
-					$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
-					$tid++;
-					$self->{tables}{$tb_name}{internal_id} = $tid;
-				}
-				push(@{$self->{tables}{$tb_name}{alter_table}}, "ADD CONSTRAINT \L$constname\E FOREIGN KEY $2");
-				if ($other_def =~ /(ON\s+DELETE\s+(?:NO ACTION|RESTRICT|CASCADE|SET NULL))/is) {
-					$self->{tables}{$tb_name}{alter_table}[-1] .= " $1";
-				}
-				if ($other_def =~ /(ON\s+UPDATE\s+(?:NO ACTION|RESTRICT|CASCADE|SET NULL))/is) {
-					$self->{tables}{$tb_name}{alter_table}[-1] .= " $1";
-				}
-				my $validate = '';
-				$validate = ' NOT VALID' if ( $other_def =~ /NOVALIDATE/is);
-				$self->{tables}{$tb_name}{alter_table}[-1] .= $validate;
-				push(@{$self->{tables}{$tb_name}{alter_index}}, $tbspace_move) if ($tbspace_move);
-			}
-
-			# We can just have one primary key constraint
-			if ($tb_def =~ s/CONSTRAINT\s+([^\s]+)\s+PRIMARY KEY//is) {
-				my $constname = $1;
-				$tb_def =~ s/^[^\(]+//;
-				if ( $tb_def =~ s/USING\s+INDEX\s+TABLESPACE\s+([^\s]+).*//s) {
-					$tbspace_move = "ALTER INDEX $constname SET TABLESPACE $1" if ($self->{use_tablespace});
-				} elsif ($tb_def =~ s/USING\s+INDEX\s+([^\s]+).*//s) {
-					push(@{$self->{tables}{$tb_name}{alter_table}}, "ADD PRIMARY KEY " . lc($tb_def));
-					$self->{tables}{$tb_name}{alter_table}[-1] .= " USING INDEX " . lc($1);
-				} elsif ($tb_def) {
-					push(@{$self->{tables}{$tb_name}{alter_table}}, "ADD PRIMARY KEY " . lc($tb_def));
-				}
-				if (!exists $self->{tables}{$tb_name}{table_info}{type}) {
-					$self->{tables}{$tb_name}{table_info}{type} = 'TABLE';
-					$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
-					$tid++;
-					$self->{tables}{$tb_name}{internal_id} = $tid;
-				}
-				push(@{$self->{tables}{$tb_name}{alter_index}}, $tbspace_move) if ($tbspace_move);
 			}
+			$pos++;
+		}
+		map {s/^/\t/; s/$/,\n/; } @column_defs;
+		# look for storage information
+		if ($tb_param =~ /TABLESPACE[\s]+([^\s]+)/i) {
+			$self->{tables}{$tb_name}{table_info}{tablespace} = $1;
+			$self->{tables}{$tb_name}{table_info}{tablespace} =~ s/"//g;
+		}
+		if ($tb_param =~ /PCTFREE\s+(\d+)/i) {
+			$self->{tables}{$tb_name}{table_info}{fillfactor} = $1;
+		}
+		if ($tb_param =~ /\bNOLOGGING\b/i) {
+			$self->{tables}{$tb_name}{table_info}{nologging} = 1;
 		}
 
 	}
 
+	my $tbspace_move = '';
+	while ($content =~ s/ALTER\s+TABLE[\s]+([^\s]+)\s+([^;]+);//i) {
+		my $tb_name = $1;
+		$tb_name =~ s/"//g;
+		my $tb_def = $2;
+		$tb_def =~ s/\s+/ /g;
+		$tb_def =~ s/(CHECK[^,;]+)\s+DEFERRABLE\s+INITIALLY\s+DEFERRED/$1/i;
+		if ( $self->{use_tablespace} && ($tb_def =~ /USING\s+INDEX\s+TABLESPACE\s+([^\s]+)/) ) {
+			my $tbspace = $1;
+			$tb_def =~ /\s+CONSTRAINT\s+([^\s]+)\s+/;
+			$tbspace_move = "ALTER INDEX $1 SET TABLESPACE $tbspace";
+		}
+		$tb_def =~ s/\s*USING INDEX.*//g;
+		if (!exists $self->{tables}{$tb_name}{table_info}{type}) {
+			$self->{tables}{$tb_name}{table_info}{type} = 'TABLE';
+			$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
+			$tid++;
+			$self->{tables}{$tb_name}{internal_id} = $tid;
+		}
+		push(@{$self->{tables}{$tb_name}{alter_table}}, $tb_def) if ($tb_def);
+		push(@{$self->{tables}{$tb_name}{alter_index}}, $tbspace_move) if ($tbspace_move);
+	}
+
+	while ($content =~ s/CREATE\s+(UNIQUE|BITMAP)?\s*INDEX\s+([^\s]+)\s+ON\s+([^\s\(]+)\s*\(([^;]+);//i) {
+		my $is_unique = $1;
+		my $idx_name = $2;
+		$idx_name =~ s/"//g;
+		my $tb_name = $3;
+		$tb_name =~ s/\s+/ /g;
+		my $idx_def = $4;
+		$idx_def =~ s/\s+/ /g;
+		$idx_def =~ s/\s*nologging//i;
+		$idx_def =~ s/STORAGE\s*\([^\)]+\)\s*//i;
+		$idx_def =~ s/COMPRESS(\s+\d+)?\s*//i;
+		# look for storage information
+		if ($idx_def =~ s/TABLESPACE\s*([^\s]+)\s*//i) {
+			$self->{tables}{$tb_name}{idx_tbsp}{$idx_name} = $1;
+			$self->{tables}{$tb_name}{idx_tbsp}{$idx_name} =~ s/"//g;
+		}
+		if ($idx_def =~ s/ONLINE\s*//i) {
+			$self->{tables}{$tb_name}{concurrently}{$idx_name} = 1;
+		}
+		if ($idx_def =~ s/INDEXTYPE\s+IS\s+.*SPATIAL_INDEX//i) {
+			$self->{tables}{$tb_name}{spatial}{$idx_name} = 1;
+			$self->{tables}{$tb_name}{idx_type}{$idx_name}{type} = 'SPATIAL INDEX';
+			$self->{tables}{$tb_name}{idx_type}{$idx_name}{type_name} = 'SPATIAL_INDEX';
+		}
+		if ($idx_def =~ s/layer_gtype=([^\s,]+)//i) {
+			$self->{tables}{$tb_name}{idx_type}{$idx_name}{type_constraint} = uc($1);
+		}
+		if ($idx_def =~ s/sdo_indx_dims=(\d)//i) {
+			$self->{tables}{$tb_name}{idx_type}{$idx_name}{type_dims} = $1;
+		}
+		$idx_def =~ s/\)[^\)]*$//;
+		if ($is_unique eq 'BITMAP') {
+			$is_unique = '';
+			$self->{tables}{$tb_name}{idx_type}{$idx_name}{type_name} = 'BITMAP';
+		}
+		$self->{tables}{$tb_name}{uniqueness}{$idx_name} = $is_unique || '';
+                $idx_def =~ s/SYS_EXTRACT_UTC\s*\(([^\)]+)\)/$1/isg;
+		push(@{$self->{tables}{$tb_name}{indexes}{$idx_name}}, $idx_def);
+		$self->{tables}{$tb_name}{idx_type}{$idx_name}{type} = 'NORMAL';
+		if ($idx_def =~ /\(/) {
+			$self->{tables}{$tb_name}{idx_type}{$idx_name}{type} = 'FUNCTION-BASED';
+		}
+
+		if (!exists $self->{tables}{$tb_name}{table_info}{type}) {
+			$self->{tables}{$tb_name}{table_info}{type} = 'TABLE';
+			$self->{tables}{$tb_name}{table_info}{num_rows} = 0;
+			$tid++;
+			$self->{tables}{$tb_name}{internal_id} = $tid;
+		}
+
+	}
 	# Extract comments
 	$self->read_comment_from_file();
 }
@@ -2501,7 +2433,7 @@
 	my $doloop = 1;
 	my @triggers_decl = split(/CREATE(?:\s+OR\s+REPLACE)?\s+TRIGGER\s+/, $content);
 	foreach $content (@triggers_decl) {
-		if ($content =~ s/^([^\s]+)\s+(BEFORE|AFTER|INSTEAD\s+OF)\s+(.*?)\s+ON\s+([^\s]+)\s+(.*)(\bEND\s*(?!IF|LOOP|CASE|INTO|FROM|,)[a-z0-9_]*;)//is) {
+		if ($content =~ s/^([^\s]+)\s+(BEFORE|AFTER|INSTEAD\s+OF)\s+(.*?)\s+ON\s+([^\s]+)\s+(.*)(END\s*(?!IF|LOOP|CASE|INTO|FROM|,)[a-z0-9_]*;)//is) {
 			my $t_name = $1;
 			$t_name =~ s/"//g;
 			my $t_pos = $2;
@@ -2533,7 +2465,7 @@
 			$tid++;
 
 			# TRIGGER_NAME, TRIGGER_TYPE, TRIGGERING_EVENT, TABLE_NAME, TRIGGER_BODY, WHEN_CLAUSE, DESCRIPTION,ACTION_TYPE
-			$trigger =~ s/\bEND\s+[^\s]+\s+$/END/is;
+			$trigger =~ s/END\s+[^\s]+\s+$/END/is;
 			$trigger =~ s/\%TEXTVALUE-(\d+)\%/'$text_values[$1]'/gs;
 			push(@{$self->{triggers}}, [($t_name, $t_pos, $t_event, $tb_name, $trigger, $t_when_cond, '', $t_type)]);
 
@@ -2552,7 +2484,7 @@
 	my $tid = 0; 
 
 	# Sequences 
-	while ($content =~ s/CREATE\s+SEQUENCE[\s]+([^\s;]+)\s*([^;]+);//i) {
+	while ($content =~ s/CREATE\s+SEQUENCE[\s]+([^\s]+)\s*([^;]+);//i) {
 		my $s_name = $1;
 		$s_name =~ s/"//g;
 		my $s_def = $2;
@@ -2560,7 +2492,6 @@
 		$tid++;
 		my @seq_info = ();
 
-		# Field of @seq_info
 		# SEQUENCE_NAME, MIN_VALUE, MAX_VALUE, INCREMENT_BY, LAST_NUMBER, CACHE_SIZE, CYCLE_FLAG, SEQUENCE_OWNER FROM $self->{prefix}_SEQUENCES";
 		push(@seq_info, $s_name);
 		if ($s_def =~ /MINVALUE\s+([\-\d]+)/i) {
@@ -2609,28 +2540,21 @@
 	# Load file in a single string
 	my $content = $self->_get_dml_from_file();
 
-	my @tbsps = split(/\s*;\s*/, $content);
-	# tablespace without undo ones
-	foreach $content (@tbsps) {
-		$content .= ';';
-		if ($content =~ /CREATE\s+(?:BIGFILE|SMALLFILE)?\s*(?:TEMPORARY)?\s*TABLESPACE\s+([^\s;]+)\s*([^;]*);/is) {
-			my $t_name = $1;
-			my $t_def = $2;
-			$t_name =~ s/"//g;
-			if ($t_def =~ /(?:DATA|TEMP)FILE\s+'([^']+)'/is) {
-				my $t_path = $1;
-				$t_path =~ s/:/\//g;
-				$t_path =~ s/\\/\//g;
-				if (dirname($t_path) eq '.') {
-					$t_path = 'change_tablespace_dir';
-				} else {
-					$t_path = dirname($t_path);
-				}
-				# TYPE - TABLESPACE_NAME - FILEPATH - OBJECT_NAME
-				@{$self->{tablespaces}{TABLE}{$t_name}{$t_path}} = ();
-			}
+	my $tid = 0; 
 
+	# tablespace
+	while ($content =~ s/CREATE\s+TABLESPACE\s+([^\s]+)\s+([^;]+);//is) {
+		my $t_name = $1;
+		my $t_def = $2;
+		$t_name =~ s/"//g;
+		if ($t_def =~ s/.*DATAFILE\s+'([^']+)'.*/$1/s) {
+			$tid++;
+			# get path
+			my $t_path = dirname($t_def);
+			# TYPE - TABLESPACE_NAME - FILEPATH - OBJECT_NAME
+			@{$self->{tablespaces}{TABLE}{$t_name}{$t_path}} = ();
 		}
+
 	}
 }
 
@@ -3063,7 +2987,7 @@
 	my ($self, $outfile) = @_;
 
 	my $sql_header = "-- Generated by Ora2Pg, the Oracle database Schema converter, version $VERSION\n";
-	$sql_header .= "-- Copyright 2000-2017 Gilles DAROLD. All rights reserved.\n";
+	$sql_header .= "-- Copyright 2000-2016 Gilles DAROLD. All rights reserved.\n";
 	$sql_header .= "-- DATASOURCE: $self->{oracle_dsn}\n\n";
 	if ($self->{client_encoding}) {
 		$sql_header .= "SET client_encoding TO '\U$self->{client_encoding}\E';\n\n";
@@ -3101,7 +3025,6 @@
 				$self->dump("\\i $file_name\n");
 				$self->logit("Dumping to one file per view : ${view}_$self->{output}\n", 1);
 				$fhdl = $self->open_export_file("${view}_$self->{output}");
-				set_binmode($fhdl);
 			}
 			if (!$self->{pg_supports_checkoption}) {
 				$self->{views}{$view}{text} =~ s/\s*WITH\s+CHECK\s+OPTION//is;
@@ -3126,14 +3049,7 @@
 					$tmpv =~ s/\./"."/;
 					$sql_output .= "CREATE OR REPLACE VIEW \"$tmpv\" AS ";
 				}
-				$sql_output .= $self->{views}{$view}{text} . ";\n";
-				if ($self->{estimate_cost}) {
-					my ($cost, %cost_detail) = Ora2Pg::PLSQL::estimate_cost($self, $self->{views}{$view}{text}, 'VIEW');
-					$cost += $Ora2Pg::PLSQL::OBJECT_SCORE{'VIEW'};
-					$cost_value += $cost;
-					$sql_output .= "\n-- Estimed cost of view [ $view ]: " . sprintf("%2.2f", $cost);
-				}
-				$sql_output .= "\n";
+				$sql_output .= $self->{views}{$view}{text} . ";\n\n";
 			} else {
 				if (!$self->{preserve_case}) {
 					$sql_output .= "CREATE OR REPLACE VIEW \L$tmpv\E (";
@@ -3168,14 +3084,7 @@
 						$self->{views}{$view}{text} =~ s/SELECT[^\s]*(.*?)\bFROM\b/SELECT $clause FROM/is;
 					}
 				}
-				$sql_output .= ") AS " . $self->{views}{$view}{text} . ";\n";
-				if ($self->{estimate_cost}) {
-					my ($cost, %cost_detail) = Ora2Pg::PLSQL::estimate_cost($self, $self->{views}{$view}{text}, 'VIEW');
-					$cost += $Ora2Pg::PLSQL::OBJECT_SCORE{'VIEW'};
-					$cost_value += $cost;
-					$sql_output .= "\n-- Estimed cost of view [ $view ]: " . sprintf("%2.2f", $cost);
-				}
-				$sql_output .= "\n";
+				$sql_output .= ") AS " . $self->{views}{$view}{text} . ";\n\n";
 			}
 
 			if ($self->{force_owner}) {
@@ -3368,7 +3277,6 @@
 				$self->dump("\\i $file_name\n");
 				$self->logit("Dumping to one file per materialized view : ${view}_$self->{output}\n", 1);
 				$fhdl = $self->open_export_file("${view}_$self->{output}");
-				set_binmode($fhdl);
 			}
 			if (!$self->{plsql_pgsql}) {
 				$sql_output .= "CREATE MATERIALIZED VIEW $view\n";
@@ -3582,7 +3490,7 @@
 			if (!$self->{quiet} && !$self->{debug}) {
 				print STDERR $self->progress_bar($i, $num_total_sequence, 25, '=', 'sequences', "generating $seq->[0]" ), "\r";
 			}
-			my $cache = '';
+			my $cache = 1;
 			$cache = $seq->[5] if ($seq->[5]);
 			my $cycle = '';
 			$cycle = ' CYCLE' if ($seq->[6] eq 'Y');
@@ -3595,7 +3503,7 @@
 				$seq->[0] =~ s/\./"."/;
 				$sql_output .= "CREATE SEQUENCE \"$seq->[0]\" INCREMENT $seq->[3]";
 			}
-			if ($seq->[1] eq '' || $seq->[1] < (-2**63-1)) {
+			if ($seq->[1] < (-2**63-1)) {
 				$sql_output .= " NO MINVALUE";
 			} else {
 				$sql_output .= " MINVALUE $seq->[1]";
@@ -3604,14 +3512,12 @@
 			if (($seq->[2] > 0) && ($seq->[2] < $seq->[4])) {
 				$seq->[2] = $seq->[4];
 			}
-			if ($seq->[2] eq '' || $seq->[2] > (2**63-1)) {
+			if ($seq->[2] > (2**63-1)) {
 				$sql_output .= " NO MAXVALUE";
 			} else {
 				$sql_output .= " MAXVALUE $seq->[2]";
 			}
-			$sql_output .= " START $seq->[4]";
-			$sql_output .= " CACHE $cache" if ($cache ne '');
-			$sql_output .= "$cycle;\n";
+			$sql_output .= " START $seq->[4] CACHE $cache$cycle;\n";
 
 			if ($self->{force_owner}) {
 				my $owner = $seq->[7];
@@ -3761,7 +3667,6 @@
 				$self->dump("\\i $dirprefix$trig->[0]_$self->{output}\n");
 				$self->logit("Dumping to one file per trigger : $trig->[0]_$self->{output}\n", 1);
 				$fhdl = $self->open_export_file("$trig->[0]_$self->{output}");
-				set_binmode($fhdl);
 			}
 			$trig->[1] =~ s/\s*EACH ROW//is;
 			chomp($trig->[4]);
@@ -3800,7 +3705,7 @@
 				# Replace direct call of a stored procedure in triggers
 				if ($trig->[7] eq 'CALL') {
 					if ($self->{plsql_pgsql}) {
-						$trig->[4] = Ora2Pg::PLSQL::convert_plsql_code($self, $trig->[4]);
+						$trig->[4] = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $trig->[4]);
 					}
 					$trig->[4] = "BEGIN\nPERFORM $trig->[4];\nEND;";
 				} else {
@@ -3817,7 +3722,7 @@
 							$trig->[4] .= ';' if ($trig->[4] !~ /;$/);
 							$trig->[4] = "BEGIN\n$trig->[4]\n$ret_kind\nEND;";
 						}
-						$trig->[4] = Ora2Pg::PLSQL::convert_plsql_code($self, $trig->[4]);
+						$trig->[4] = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $trig->[4]);
 						# When an exception statement is used enclosed everything
 						# in a block before returning NEW
 						if ($trig->[4] =~ /EXCEPTION(.*?)\b(END[;]*)[\s\/]*$/is) {
@@ -3899,7 +3804,7 @@
 					$sql_output .= "CREATE TRIGGER $trig->[6]\n";
 					if ($trig->[5]) {
 						if ($self->{plsql_pgsql}) {
-							$trig->[5] = Ora2Pg::PLSQL::convert_plsql_code($self, $trig->[5]);
+							$trig->[5] = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $trig->[5]);
 						}
 						$sql_output .= "\tWHEN ($trig->[5])\n";
 					}
@@ -4111,7 +4016,7 @@
 			$self->logit("Dumping query $q...\n", 1);
 			my $fhdl = undef;
 			if ($self->{plsql_pgsql}) {
-				my $sql_q = Ora2Pg::PLSQL::convert_plsql_code($self, $self->{queries}{$q});
+				my $sql_q = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{queries}{$q});
 				$sql_output .= $sql_q;
 				if ($self->{estimate_cost}) {
 					my ($cost, %cost_detail) = Ora2Pg::PLSQL::estimate_cost($self, $sql_q, 'QUERY');
@@ -4231,7 +4136,6 @@
 				$self->dump("\\i $dirprefix${fct}_$self->{output}\n");
 				$self->logit("Dumping to one file per function : ${fct}_$self->{output}\n", 1);
 				$fhdl = $self->open_export_file("${fct}_$self->{output}");
-				set_binmode($fhdl);
 			}
 			if ($self->{plsql_pgsql}) {
 
@@ -4383,7 +4287,6 @@
 				$self->dump("\\i $dirprefix${fct}_$self->{output}\n");
 				$self->logit("Dumping to one file per procedure : ${fct}_$self->{output}\n", 1);
 				$fhdl = $self->open_export_file("${fct}_$self->{output}");
-				set_binmode($fhdl);
 			}
 			if ($self->{plsql_pgsql}) {
 				my $sql_p = '';
@@ -4514,10 +4417,6 @@
 				print STDERR $self->progress_bar($i, $num_total_package, 25, '=', 'packages', "generating $pkg" ), "\r";
 			}
 			$i++, next if (!$self->{packages}{$pkg}{text});
-
-			# Cleanup previous global variables defined in other package
-			%{$self->{global_variables}} = ();
-
 			my $pkgbody = '';
 			my $fct_cost = '';
 			if (!$self->{plsql_pgsql}) {
@@ -4525,7 +4424,6 @@
 				if ($self->{file_per_function}) {
 					$pkgbody = "\\i $dirprefix\L${pkg}\E_$self->{output}\n";
 					my $fhdl = $self->open_export_file("$dirprefix\L${pkg}\E_$self->{output}", 1);
-					set_binmode($fhdl);
 					$self->dump($sql_header . $self->{packages}{$pkg}{text}, $fhdl);
 					$self->close_export_file($fhdl);
 				} else {
@@ -4566,12 +4464,8 @@
 					$fct_cost .= "-- Total estimated cost for package $pkg: $cost_value units, " . $self->_get_human_cost($cost_value) . "\n";
 				}
 				foreach my $txt (@codes) {
-					$self->{idxcomment} = 0;
-					my %comments = $self->_remove_comments(\$txt);
-					$txt = $self->_convert_package("CREATE OR REPLACE PACKAGE BODY$txt", $self->{packages}{$pkg}{owner}, \%comments);
-					$self->_restore_comments(\$txt, \%comments);
-					$pkgbody .= $txt;
-					$pkgbody =~ s/[\r\n]*\bEND;\s*$//is;
+					$pkgbody .= $self->_convert_package("CREATE OR REPLACE PACKAGE BODY$txt", $self->{packages}{$pkg}{owner});
+					$pkgbody =~ s/[\r\n]*END;\s*$//is;
 					$pkgbody =~ s/(\s*;)\s*$/$1/is;
 				}
 			}
@@ -4605,23 +4499,6 @@
 		}
 		$self->dump($sql_output);
 		$self->{packages} = ();
-		$sql_output = '';
-		# Create file to load custom variable initialization into postgresql.conf
-		if (scalar keys %{$self->{global_variables}}) {
-			my $default_vars = '';
-			foreach my $n (keys %{$self->{global_variables}}) {
-				my $str = $n;
-				if (exists $self->{global_variables}{$n}{constant} || exists $self->{global_variables}{$n}{default}) {
-					$default_vars .= "$n = '$self->{global_variables}{$n}{default}\n";
-				}
-			}
-			if ($default_vars) {
-				open(OUT, ">$self->{output_dir}/global_variables.conf");
-				print OUT "-- Global variables with default values used in packages.\n";
-				print OUT $default_vars;
-				close(OUT);
-			}
-		}
 		return;
 	}
 
@@ -4635,20 +4512,25 @@
 		if ($self->{input_file}) {
 			$self->{types} = ();
 			$self->logit("Reading input code from file $self->{input_file}...\n", 1);
-			my $old_sep = $|;
-			$/ = undef;
 			open(IN, "$self->{input_file}");
-			my $content = <IN>;
+			my @alltype = <IN>;
 			close(IN);
-			$/ = $old_sep;
-			my @alltype = split(/;/, $content);
+			my $typnm = '';
+			my $code = '';
 			foreach my $l (@alltype) {
 				chomp($l);
-				next if ($l =~ /^[\s\/]*$/s);
-				$l =~ s/^.*CREATE\s+(?:OR REPLACE)?\s*(?:NONEDITABLE|EDITABLE)?\s*//is;
-				$l .= ";\n";
-				if ($l =~ /^TYPE\s+([^\s\(]+)/is) {
-					push(@{$self->{types}}, { ('name' => $1, 'code' => $l) });
+				next if ($l =~ /^\s*$/);
+				$l =~ s/^\s*CREATE\s*(?:OR REPLACE)?\s*(?:NONEDITABLE|EDITABLE)?\s*//i;
+				$l =~ s/^\s*CREATE\s*//i;
+				$code .= $l . "\n";
+				if ($code =~ /^TYPE\s+([^\s\(]+)/is) {
+					$typnm = $1;
+				}
+				next if (!$typnm);
+				if ($code =~ /;/s) {
+					push(@{$self->{types}}, { ('name' => $typnm, 'code' => $code) });
+					$typnm = '';
+					$code = '';
 				}
 			}
 		}
@@ -4696,9 +4578,8 @@
 				foreach my $tb_path (sort keys %{$self->{tablespaces}{$tb_type}{$tb_name}}) {
 					# Replace Oracle tablespace filename
 					my $loc = $tb_name;
-					if ($tb_path =~ /^(.*[^\\\/]+)/) {
-						$loc = $1 . '/' . $loc;
-					}
+					$tb_path =~ /^(.*)[^\\\/]+$/;
+					$loc = $1 . $loc;
 					if (!grep(/^$tb_name$/, @done)) {
 						$create_tb .= "CREATE TABLESPACE \L$tb_name\E LOCATION '$loc';\n";
 						my $owner = $self->{list_tablespaces}{$tb_name}{owner} || '';
@@ -4734,7 +4615,6 @@
 			my $fhdl = undef;
 			$self->logit("Dumping tablespace alter indexes to one separate file : TBSP_INDEXES_$self->{output}\n", 1);
 			$fhdl = $self->open_export_file("TBSP_INDEXES_$self->{output}");
-			set_binmode($fhdl);
 			$sql_output = '';
 			foreach my $tb_type (sort keys %{$self->{tablespaces}}) {
 				# TYPE - TABLESPACE_NAME - FILEPATH - OBJECT_NAME
@@ -5180,9 +5060,6 @@
 		# Recreate constraint an indexes if required
 		$self->dump("\n$footer") if (!$self->{pg_dsn} && $footer);
 
-		# Close main data output file
-		$self->{fhout}->close();
-
 		# Disconnect from the database
 		$self->{dbh}->disconnect() if ($self->{dbh});
 		$self->{dbhdest}->disconnect() if ($self->{dbhdest});
@@ -5204,7 +5081,6 @@
 			}
 		}
 		my $i = 1;
-		my $partition_indexes = ();
 		foreach my $table (sort keys %{$self->{partitions}}) {
 			my $function = qq{
 CREATE OR REPLACE FUNCTION ${table}_insert_trigger()
@@ -5241,12 +5117,12 @@
 						} else {
 							if ($#{$self->{partitions}{$table}{$pos}{$part}} == 0) {
 								if ($old_part eq '') {
-									$check_cond .= "\t$self->{partitions}{$table}{$pos}{$part}[$i]->{column} < " . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value});
+									$check_cond .= "\t$self->{partitions}{$table}{$pos}{$part}[$i]->{column} < " . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value});
 								} else {
-									$check_cond .= "\t$self->{partitions}{$table}{$pos}{$part}[$i]->{column} >= " . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{partitions}{$table}{$old_pos}{$old_part}[$i]->{value}) . " AND $self->{partitions}{$table}{$pos}{$part}[$i]->{column} < " . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value});
+									$check_cond .= "\t$self->{partitions}{$table}{$pos}{$part}[$i]->{column} >= " . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{partitions}{$table}{$old_pos}{$old_part}[$i]->{value}) . " AND $self->{partitions}{$table}{$pos}{$part}[$i]->{column} < " . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value});
 								}
 							} else {
-								my @values = split(/,\s/, Ora2Pg::PLSQL::convert_plsql_code($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value}));
+								my @values = split(/,\s/, Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value}));
 								# multicolumn partitioning
 								$check_cond .= "\t$self->{partitions}{$table}{$pos}{$part}[$i]->{column} < " .  $values[$i];
 							}
@@ -5258,30 +5134,30 @@
 							$fct = $1;
 						}
 						my $cindx = $self->{partitions}{$table}{$pos}{$part}[$i]->{column} || '';
-						$cindx = Ora2Pg::PLSQL::convert_plsql_code($self, $cindx);
+						$cindx = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $cindx);
 						$create_table{$table}{'index'} .= "CREATE INDEX ${tb_name}_$colname ON $tb_name ($cindx);\n";
 						if ($self->{partitions_default}{$table} && ($create_table{$table}{'index'} !~ /ON $self->{partitions_default}{$table} /)) {
 							$cindx = $self->{partitions}{$table}{$pos}{$part}[$i]->{column} || '';
-							$cindx = Ora2Pg::PLSQL::convert_plsql_code($self, $cindx);
+							$cindx = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $cindx);
 							$create_table{$table}{'index'} .= "CREATE INDEX $self->{partitions_default}{$table}_$colname ON $self->{partitions_default}{$table} ($cindx);\n";
 						}
 						push(@ind_col, $self->{partitions}{$table}{$pos}{$part}[$i]->{column}) if (!grep(/^$self->{partitions}{$table}{$pos}{$part}[$i]->{column}$/, @ind_col));
 						if ($self->{partitions}{$table}{$pos}{$part}[$i]->{type} eq 'LIST') {
 							if (!$fct) {
-								push(@condition, "NEW.$self->{partitions}{$table}{$pos}{$part}[$i]->{column} IN (" . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value}) . ")");
+								push(@condition, "NEW.$self->{partitions}{$table}{$pos}{$part}[$i]->{column} IN (" . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value}) . ")");
 							} else {
-								push(@condition, "$fct(NEW.$colname) IN (" . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value}) . ")");
+								push(@condition, "$fct(NEW.$colname) IN (" . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value}) . ")");
 							}
 						} else {
 							if (!$fct) {
-								push(@condition, "NEW.$self->{partitions}{$table}{$pos}{$part}[$i]->{column} < " . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value}));
+								push(@condition, "NEW.$self->{partitions}{$table}{$pos}{$part}[$i]->{column} < " . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value}));
 							} else {
-								push(@condition, "$fct(NEW.$colname) < " . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value}));
+								push(@condition, "$fct(NEW.$colname) < " . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{partitions}{$table}{$pos}{$part}[$i]->{value}));
 							}
 						}
 						$owner = $self->{partitions}{$table}{$pos}{$part}[$i]->{owner} || '';
 					}
-					$check_cond = Ora2Pg::PLSQL::convert_plsql_code($self, $check_cond);
+					$check_cond = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $check_cond);
 					$create_table{$table}{table} .= $check_cond;
 					$create_table{$table}{table} .= "\n) ) INHERITS ($table);\n";
 					$owner = $self->{force_owner} if ($self->{force_owner} ne "1");
@@ -5311,12 +5187,12 @@
 									} else {
 										if ($#{$self->{subpartitions}{$table}{$p}{$subpart}} == 0) {
 											if ($sub_old_part eq '') {
-												$sub_check_cond .= "$self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} < " . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value});
+												$sub_check_cond .= "$self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} < " . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value});
 											} else {
-												$sub_check_cond .= "$self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} >= " . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{subpartitions}{$table}{$old_pos}{$sub_old_part}[$i]->{value}) . " AND $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} < " . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value});
+												$sub_check_cond .= "$self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} >= " . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{subpartitions}{$table}{$old_pos}{$sub_old_part}[$i]->{value}) . " AND $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} < " . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value});
 											}
 										} else {
-											my @values = split(/,\s/, Ora2Pg::PLSQL::convert_plsql_code($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value}));
+											my @values = split(/,\s/, Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value}));
 											# multicolumn partitioning
 											$sub_check_cond .= "\t$self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} < " .  $values[$i];
 										}
@@ -5329,27 +5205,27 @@
 										$fct = $1;
 									}
 									$cindx = join(',', @ind_col);
-									$cindx = Ora2Pg::PLSQL::convert_plsql_code($self, $cindx);
+									$cindx = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $cindx);
 									$create_table{$table}{'index'} .= "CREATE INDEX ${tb_name}_${sub_tb_name}_$colname ON ${tb_name}_$sub_tb_name ($cindx);\n";
 									if ($self->{subpartitions_default}{$table} && ($create_table{$table}{'index'} !~ /ON $self->{subpartitions_default}{$table} /)) {
 										$create_table{$table}{'index'} .= "CREATE INDEX ${tb_name}_$self->{subpartitions_default}{$table}_$colname ON ${tb_name}_$self->{subpartitions_default}{$table} ($cindx);\n";
 									}
 									if ($self->{subpartitions}{$table}{$p}{$subpart}[$i]->{type} eq 'LIST') {
 										if (!$fct) {
-											push(@subcondition, "NEW.$self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} IN (" . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value}) . ")");
+											push(@subcondition, "NEW.$self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} IN (" . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value}) . ")");
 										} else {
-											push(@subcondition, "$fct(NEW.$colname) IN (" . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value}) . ")");
+											push(@subcondition, "$fct(NEW.$colname) IN (" . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value}) . ")");
 										}
 									} else {
 										if (!$fct) {
-											push(@subcondition, "NEW.$self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} < " . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value}));
+											push(@subcondition, "NEW.$self->{subpartitions}{$table}{$p}{$subpart}[$i]->{column} < " . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value}));
 										} else {
-											push(@subcondition, "$fct(NEW.$colname) < " . Ora2Pg::PLSQL::convert_plsql_code($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value}));
+											push(@subcondition, "$fct(NEW.$colname) < " . Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{value}));
 										}
 									}
 									$owner = $self->{subpartitions}{$table}{$p}{$subpart}[$i]->{owner} || '';
 								}
-								$sub_check_cond = Ora2Pg::PLSQL::convert_plsql_code($self, $sub_check_cond);
+								$sub_check_cond = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $sub_check_cond);
 								$create_table{$table}{table} .= "$check_cond AND $sub_check_cond";
 								$create_table{$table}{table} .= "\n) ) INHERITS ($table);\n";
 								$owner = $self->{force_owner} if ($self->{force_owner} ne "1");
@@ -5369,7 +5245,7 @@
 					if (!$sub_funct_cond) {
 						$funct_cond .= "\t$cond ( " . join(' AND ', @condition) . " ) THEN INSERT INTO $tb_name VALUES (NEW.*);\n";
 					} else {
-						$sub_funct_cond = Ora2Pg::PLSQL::convert_plsql_code($self, $sub_funct_cond);
+						$sub_funct_cond = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $sub_funct_cond);
 						$funct_cond .= "\t$cond ( " . join(' AND ', @condition) . " ) THEN \n";
 						$funct_cond .= $sub_funct_cond;
 						$funct_cond .= "\t\tELSE INSERT INTO $tb_name VALUES (NEW.*);\n";
@@ -5393,7 +5269,7 @@
                 RAISE EXCEPTION 'Value out of range. Fix the ${table}_insert_trigger() function!';
 };
 			}
-			$function = Ora2Pg::PLSQL::convert_plsql_code($self, $function);
+			$function = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $function);
 			$function .= qq{
         END IF;
         RETURN NULL;
@@ -5402,11 +5278,6 @@
 LANGUAGE plpgsql;
 };
 
-			$partition_indexes .= qq{
--- Create indexes on each partition table
-$create_table{$table}{'index'}
-
-};
 			$sql_output .= qq{
 $create_table{$table}{table}
 };
@@ -5415,6 +5286,8 @@
 CREATE TABLE $self->{partitions_default}{$table} () INHERITS ($table);
 } if ($self->{partitions_default}{$table});
 			$sql_output .= qq{
+-- Create indexes on each partition table
+$create_table{$table}{'index'}
 
 $function
 
@@ -5443,18 +5316,8 @@
 		if (!$sql_output) {
 			$sql_output = "-- Nothing found of type $self->{type}\n";
 		}
+	
 		$self->dump($sql_header . $sql_output);
-		$self->{fhout}->close() if (defined $self->{fhout});
-
-		my $fhdl = undef;
-		$self->logit("Dumping partition indexes to file : PARTITION_INDEXES_$self->{output}\n", 1);
-		$sql_header = "-- Generated by Ora2Pg, the Oracle database Schema converter, version $VERSION\n";
-		$sql_header .= "-- Copyright 2000-2017 Gilles DAROLD. All rights reserved.\n";
-		$sql_header .= "-- DATASOURCE: $self->{oracle_dsn}\n\n";
-		$fhdl = $self->open_export_file("PARTITION_INDEXES_$self->{output}");
-		set_binmode($fhdl);
-		$self->dump($sql_header . $partition_indexes, $fhdl);
-		$self->close_export_file($fhdl);
 
 		return;
 	}
@@ -5506,12 +5369,12 @@
 
 	# DATABASE DESIGN - type 'TABLE'
 	# Dump the database structure: tables, constraints, indexes, etc.
-	if ($self->{export_schema} && ($self->{schema} || $self->{pg_schema})) {
+	if ($self->{export_schema} && $self->{schema}) {
 		if ($self->{create_schema}) {
 			if (!$self->{preserve_case}) {
-				$sql_output .= "CREATE SCHEMA " . lc($self->{pg_schema} || $self->{schema}) . ";\n";
+				$sql_output .= "CREATE SCHEMA \L$self->{schema}\E;\n";
 			} else {
-				$sql_output .= "CREATE SCHEMA \"" . ($self->{pg_schema} || $self->{schema}) . "\";\n";
+				$sql_output .= "CREATE SCHEMA \"$self->{schema}\";\n";
 			}
 		}
 		my $owner = '';
@@ -5519,9 +5382,9 @@
 		$owner ||= $self->{schema};
 		if ($owner && $self->{create_schema}) {
 			if (!$self->{preserve_case}) {
-				$sql_output .= "ALTER SCHEMA " . lc($self->{pg_schema} || $self->{schema}) . " OWNER TO \L$owner\E;\n";
+				$sql_output .= "ALTER SCHEMA \L$self->{schema}\E OWNER TO \L$owner\E;\n";
 			} else {
-				$sql_output .= "ALTER SCHEMA \"" . ($self->{pg_schema} || $self->{schema}) . "\" OWNER TO \"$owner\";\n";
+				$sql_output .= "ALTER SCHEMA \"$self->{schema}\" OWNER TO \"$owner\";\n";
 			}
 		}
 		$sql_output .= "\n";
@@ -5591,7 +5454,7 @@
 		}
 		if (exists $self->{tables}{$table}{table_as}) {
 			if ($self->{plsql_pgsql}) {
-				$self->{tables}{$table}{table_as} = Ora2Pg::PLSQL::convert_plsql_code($self, $self->{tables}{$table}{table_as});
+				$self->{tables}{$table}{table_as} = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $self->{tables}{$table}{table_as});
 			}
 			my $withoid = '';
 			$withoid = 'WITH (OIDS)' if ($self->{with_oid});
@@ -5702,6 +5565,7 @@
 					}
 					$type .= ")";
 				}
+
 				$type = $self->{'modify_type'}{"\L$table\E"}{"\L$f->[0]\E"} if (exists $self->{'modify_type'}{"\L$table\E"}{"\L$f->[0]\E"});
 				if (!$self->{preserve_case}) {
 					$fname = $self->quote_reserved_words($fname);
@@ -5724,7 +5588,7 @@
 					$f->[4] =~ s/^\s+//;
 					$f->[4] =~ s/\s+$//;
 					if ($self->{plsql_pgsql}) {
-						$f->[4] = Ora2Pg::PLSQL::convert_plsql_code($self, $f->[4]);
+						$f->[4] = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $f->[4]);
 					}
 					if (($f->[4] ne '') && ($self->{type} ne 'FDW')) {
 						if (($type eq 'boolean') && exists $self->{ora_boolean_values}{lc($f->[4])}) {
@@ -5732,7 +5596,7 @@
 						} else {
 							if (($f->[4] !~ /^'/) && ($f->[4] =~ /[^\d\.]/)) {
 								if ($type =~ /CHAR|TEXT|ENUM/i) {
-									$f->[4] = "'$f->[4]'" if ($f->[4] !~ /'/);
+									$f->[4] = "'$f->[4]'";
 								} elsif ($type =~ /DATE|TIME/i) {
 									# do not use REPLACE_ZERO_DATE in default value, cause it can be NULL
 									$f->[4] =~ s/^0000-00-00.*/1970-01-01 00:00:00/;
@@ -5814,23 +5678,18 @@
 				$sql_output .= "ALTER $self->{tables}{$table}{table_info}{type} $tbname OWNER TO \"$owner\";\n";
 			}
 		}
+		if (exists $self->{tables}{$table}{alter_table}) {
+			$obj_type =~ s/UNLOGGED //;
+			foreach (@{$self->{tables}{$table}{alter_table}}) {
+				$sql_output .= "\nALTER $obj_type $tbname $_;\n";
+			}
+		}
 		if (exists $self->{tables}{$table}{alter_index}) {
 			foreach (@{$self->{tables}{$table}{alter_index}}) {
 				$sql_output .= "$_;\n";
 			}
 		}
 		if ($self->{type} ne 'FDW') {
-			# Set the indexes definition
-			my ($idx, $fts_idx) = $self->_create_indexes($table, 0, %{$self->{tables}{$table}{indexes}});
-			$indices .= "$idx\n" if ($idx);
-			$fts_indices .= "$fts_idx\n" if ($fts_idx);
-			if (!$self->{file_per_index}) {
-				$sql_output .= $indices;
-				$indices = '';
-				$sql_output .= $fts_indices;
-				$fts_indices = '';
-			}
-
 			# Set the unique (and primary) key definition 
 			$constraints .= $self->_create_unique_keys($table, $self->{tables}{$table}{unique_key});
 			# Set the check constraint definition 
@@ -5840,12 +5699,15 @@
 				$constraints = '';
 			}
 
-
-		}
-		if (exists $self->{tables}{$table}{alter_table}) {
-			$obj_type =~ s/UNLOGGED //;
-			foreach (@{$self->{tables}{$table}{alter_table}}) {
-				$sql_output .= "\nALTER $obj_type $tbname $_;\n";
+			# Set the indexes definition
+			my ($idx, $fts_idx) = $self->_create_indexes($table, 0, %{$self->{tables}{$table}{indexes}});
+			$indices .= "$idx\n" if ($idx);
+			$fts_indices .= "$fts_idx\n" if ($fts_idx);
+			if (!$self->{file_per_index}) {
+				$sql_output .= $indices;
+				$indices = '';
+				$sql_output .= $fts_indices;
+				$fts_indices = '';
 			}
 		}
 		$ib++;
@@ -5858,7 +5720,6 @@
 		my $fhdl = undef;
 		$self->logit("Dumping indexes to one separate file : INDEXES_$self->{output}\n", 1);
 		$fhdl = $self->open_export_file("INDEXES_$self->{output}");
-		set_binmode($fhdl);
 		$indices = "-- Nothing found of type indexes\n" if (!$indices);
 		$indices =~ s/\n+/\n/gs;
 		$self->dump($sql_header . $indices, $fhdl);
@@ -5866,36 +5727,12 @@
 		$indices = '';
 		if ($fts_indices) {
 			$fts_indices =~ s/\n+/\n/gs;
-			my $unaccent = '';
-			if ($self->{use_lower_unaccent}) {
-				$unaccent = qq{
-CREATE OR REPLACE FUNCTION unaccent_immutable(text)
-RETURNS text AS
-\$\$
-  SELECT lower(public.unaccent('public.unaccent', \$1));
-\$\$ LANGUAGE sql IMMUTABLE;
-
-};
-			} elsif ($self->{use_unaccent}) {
-				$unaccent = qq{
-CREATE OR REPLACE FUNCTION unaccent_immutable(text)
-RETURNS text AS
-\$\$
-  SELECT public.unaccent('public.unaccent', \$1);
-\$\$ LANGUAGE sql IMMUTABLE;
-
-};
-			}
-
-			# FTS TRIGGERS are exported in a separated file to be able to parallelize index creation
-			if ($fts_indices) {
-				$self->logit("Dumping triggers for FTS indexes to one separate file : FTS_INDEXES_$self->{output}\n", 1);
-				$fhdl = $self->open_export_file("FTS_INDEXES_$self->{output}");
-				set_binmode($fhdl);
-				$self->dump($sql_header . $unaccent . $fts_indices, $fhdl);
-				$self->close_export_file($fhdl);
-				$fts_indices = '';
-			}
+			# FTS TRIGGERS are exported in a separated file to be able to parallelyze index creation
+			$self->logit("Dumping triggers for FTS indexes to one separate file : FTS_INDEXES_$self->{output}\n", 1);
+			$fhdl = $self->open_export_file("FTS_INDEXES_$self->{output}");
+			$self->dump($sql_header . $fts_indices, $fhdl);
+			$self->close_export_file($fhdl);
+			$fts_indices = '';
 		}
 	}
 
@@ -5920,7 +5757,6 @@
 		my $fhdl = undef;
 		$self->logit("Dumping constraints to one separate file : CONSTRAINTS_$self->{output}\n", 1);
 		$fhdl = $self->open_export_file("CONSTRAINTS_$self->{output}");
-		set_binmode($fhdl);
 		$constraints = "-- Nothing found of type constraints\n" if (!$constraints);
 		$self->dump($sql_header . $constraints, $fhdl);
 		$self->close_export_file($fhdl);
@@ -6109,6 +5945,7 @@
 	my ($self, $table, $indexonly, %indexes) = @_;
 
 	my $tbsaved = $table;
+
 	# The %indexes hash can be passed from table or materialized views definition
 	my $objtyp = 'tables';
 	if (!exists $self->{tables}{$tbsaved} && exists $self->{materialized_views}{$tbsaved}) {
@@ -6137,7 +5974,8 @@
 
 		if (exists $self->{replaced_cols}{"\L$tbsaved\E"} && $self->{replaced_cols}{"\L$tbsaved\E"}) {
 			foreach my $c (keys %{$self->{replaced_cols}{"\L$tbsaved\E"}}) {
-				map { s/\b$c\b/$self->{replaced_cols}{"\L$tbsaved\E"}{$c}/i } @{$indexes{$idx}};
+				map { s/^"$c"$/"$self->{replaced_cols}{"\L$tbsaved\E"}{$c}"/i } @{$indexes{$idx}};
+				map { s/^$c$/$self->{replaced_cols}{"\L$tbsaved\E"}{$c}/i } @{$indexes{$idx}};
 			}
 		}
 
@@ -6149,7 +5987,7 @@
 				$i++;
 			}
 			if ($self->{plsql_pgsql}) {
-				$indexes{$idx}->[$j] = Ora2Pg::PLSQL::convert_plsql_code($self, $indexes{$idx}->[$j]);
+				$indexes{$idx}->[$j] = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $indexes{$idx}->[$j]);
 			}
 		}
 
@@ -6269,27 +6107,10 @@
 				($self->{context_as_trgm} && ($self->{$objtyp}{$tbsaved}{idx_type}{$idx}{type_name} =~ /FULLTEXT|CONTEXT/)) ) {
 				# use pg_trgm
 				my @cols = split(/\s*,\s*/, $columns);
-				map { s/^(.*)$/unaccent_immutable($1)/; } @cols;
 				$columns = join(" gin_trgm_ops, ", @cols);
 				$columns .= " gin_trgm_ops";
 				$str .= "CREATE INDEX$concurrently \L$idxname$self->{indexes_suffix}\E ON $table USING gin($columns)";
-			} elsif (($self->{$objtyp}{$tbsaved}{idx_type}{$idx}{type_name} =~ /FULLTEXT|CONTEXT/) && $self->{fts_index_only}) {
-				my $stemmer = $self->{fts_config} || lc($self->{$objtyp}{$tbsaved}{idx_type}{$idx}{stemmer}) || 'pg_catalog.english';
-				my $dico = $stemmer;
-				$dico =~ s/^pg_catalog\.//;
-				if ($self->{use_unaccent}) {
-					$dico =~ s/^(..).*/$1/;
-					if ($fts_str !~ /CREATE TEXT SEARCH CONFIGURATION $dico (COPY = $stemmer);/s) {
-						$fts_str .= "CREATE TEXT SEARCH CONFIGURATION $dico (COPY = $stemmer);\n";
-						$stemmer =~ s/pg_catalog\.//;
-						$fts_str .= "ALTER TEXT SEARCH CONFIGURATION $dico ALTER MAPPING FOR hword, hword_part, word WITH unaccent, ${stemmer}_stem;\n\n";
-					}
-				}
-				# use function-based index
-				my @cols = split(/\s*,\s*/, $columns);
-				$columns = "to_tsvector('$dico', " . join("||' '||", @cols) . ")";
-				$fts_str .= "CREATE INDEX$concurrently \L$idxname$self->{indexes_suffix}\E ON $table USING gin($columns);\n";
-			} elsif (($self->{$objtyp}{$tbsaved}{idx_type}{$idx}{type_name} =~ /FULLTEXT|CONTEXT/) && !$self->{fts_index_only}) {
+			} elsif ($self->{$objtyp}{$tbsaved}{idx_type}{$idx}{type_name} =~ /FULLTEXT|CONTEXT/) {
 				# use Full text search, then create dedicated column and trigger before the index.
 				map { s/"//g; } @{$indexes{$idx}};
 				my $newcolname = $self->quote_object_name(join('_', @{$indexes{$idx}}));
@@ -6301,29 +6122,13 @@
 				my $contruct_vector =  '';
 				my $update_vector =  '';
 				my $weight = 'A';
-				my $stemmer = $self->{fts_config} || lc($self->{$objtyp}{$tbsaved}{idx_type}{$idx}{stemmer}) || 'pg_catalog.english';
-				my $dico = $stemmer;
-				$dico =~ s/^pg_catalog\.//;
-				if ($self->{use_unaccent}) {
-					$dico =~ s/^(..).*/$1/;
-					if ($fts_str !~ /CREATE TEXT SEARCH CONFIGURATION $dico (COPY = $stemmer);/s) {
-						$fts_str .= "CREATE TEXT SEARCH CONFIGURATION $dico (COPY = $stemmer);\n";
-						$stemmer =~ s/pg_catalog\.//;
-						$fts_str .= "ALTER TEXT SEARCH CONFIGURATION $dico ALTER MAPPING FOR hword, hword_part, word WITH unaccent, ${stemmer}_stem;\n\n";
-					}
-				}
-				if ($#{$indexes{$idx}} > 0) {
-					foreach my $col (@{$indexes{$idx}}) {
-						$contruct_vector .= "\t\tsetweight(to_tsvector('$dico', coalesce(new.$col,'')), '$weight') ||\n";
-						$update_vector .= " setweight(to_tsvector('$dico', coalesce($col,'')), '$weight') ||";
-						$weight++;
-					}
-					$contruct_vector =~ s/\|\|$/;/s;
-					$update_vector =~ s/\|\|$/;/s;
-				} else {
-					$contruct_vector = "\t\tto_tsvector('$dico', coalesce(new.$indexes{$idx}->[0],''))\n";
-					$update_vector = " to_tsvector('$dico', coalesce($indexes{$idx}->[0],''))";
+				foreach my $col (@{$indexes{$idx}}) {
+					$contruct_vector .= "\t\tsetweight(to_tsvector('pg_catalog.english', coalesce(new.$col,'')), '$weight') ||\n";
+					$update_vector .= " setweight(to_tsvector('pg_catalog.english', coalesce($col,'')), '$weight') ||";
+					$weight++;
 				}
+				$contruct_vector =~ s/\|\|$/;/s;
+				$update_vector =~ s/\|\|$/;/s;
 
 				$fts_str .= qq{
 -- When the data migration is done without trigger, create tsvector data for all the existing records
@@ -6361,7 +6166,7 @@
 			}
 			$str .= ";";
 			push(@out, $str);
-			push(@fts_out, $fts_str) if ($fts_str);
+			push(@fts_out, $fts_str);
 		}
 	}
 
@@ -6387,14 +6192,15 @@
 		# Cluster, bitmap join, reversed and IOT indexes will not be exported at all
 		next if ($self->{tables}{$tbsaved}{idx_type}{$idx}{type} =~ /JOIN|IOT|CLUSTER|REV/i);
 
+		map { if ($_ !~ /\(.*\)/) { s/^/"/; s/$/"/; } } @{$indexes{$idx}};
 		if (exists $self->{replaced_cols}{"\L$tbsaved\E"} && $self->{replaced_cols}{"\L$tbsaved\E"}) {
 			foreach my $c (keys %{$self->{replaced_cols}{"\L$tbsaved\E"}}) {
-				map { s/\b$c\b/$self->{replaced_cols}{"\L$tbsaved\E"}{$c}/i } @{$indexes{$idx}};
+				map { s/"$c"/"$self->{replaced_cols}{"\L$tbsaved\E"}{$c}"/i } @{$indexes{$idx}};
 			}
 		}
 		map { s/"//gs } @{$indexes{$idx}};
 		if (!$self->{preserve_case}) {
-			map { if ($_ !~ /\(.*\)/) { $_ = $self->quote_reserved_words($_) } } @{$indexes{$idx}};
+			map { $_ = $self->quote_reserved_words($_) } @{$indexes{$idx}};
 		} else {
 			map { if ($_ !~ /\(.*\)/) { s/^/"/; s/$/"/; } } @{$indexes{$idx}};
 		}
@@ -6644,9 +6450,7 @@
 	my $out = '';
 	# Set the check constraint definition 
 	foreach my $k (keys %{$check_constraint->{constraint}}) {
-		my $chkconstraint = $check_constraint->{constraint}->{$k}{condition};
-		my $validate = '';
-		$validate = ' NOT VALID' if ($check_constraint->{constraint}->{$k}{validate} eq 'NOT VALIDATED');
+		my $chkconstraint = $check_constraint->{constraint}->{$k};
 		next if (!$chkconstraint);
 		my $skip_create = 0;
 		if (exists $check_constraint->{notnull}) {
@@ -6662,7 +6466,7 @@
 				}
 			}
 			if ($self->{plsql_pgsql}) {
-				$chkconstraint = Ora2Pg::PLSQL::convert_plsql_code($self, $chkconstraint);
+				$chkconstraint = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $chkconstraint);
 			}
 			if (!$self->{preserve_case}) {
 				foreach my $c (@$field_name) {
@@ -6672,7 +6476,7 @@
 				}
 				$k = lc($k);
 			}
-			$out .= "ALTER TABLE $table ADD CONSTRAINT $k CHECK ($chkconstraint)$validate;\n";
+			$out .= "ALTER TABLE $table ADD CONSTRAINT $k CHECK ($chkconstraint);\n";
 		}
 	}
 
@@ -6702,7 +6506,6 @@
 		my $state;
 		foreach my $h (@{$self->{tables}{$tbsaved}{foreign_key}}) {
 			if (lc($h->[0]) eq lc($fkname)) {
-				# @$h : CONSTRAINT_NAME,R_CONSTRAINT_NAME,SEARCH_CONDITION,DELETE_RULE,$deferrable,DEFERRED,R_OWNER,TABLE_NAME,OWNER,UPDATE_RULE,VALIDATED
 				push(@$state, @$h);
 				last;
 			}
@@ -6775,12 +6578,10 @@
 				$str .= (($self->{'defer_fkey'} ) ? ' DEFERRABLE' : " $state->[4]") if ($state->[4]);
 				$state->[5] = 'DEFERRED' if ($state->[5] =~ /^Y/);
 				$state->[5] ||= 'IMMEDIATE';
-				$str .= " INITIALLY " . ( ($self->{'defer_fkey'} ) ? 'DEFERRED' : $state->[5] );
-				if ($state->[9] eq 'NOT VALIDATED') {
-					$str .= " NOT VALID";
-				}
+				$str .= " INITIALLY " . ( ($self->{'defer_fkey'} ) ? 'DEFERRED' : $state->[5] ) . ";\n";
+			} else {
+				$str .= ";\n";
 			}
-			$str .= ";\n";
 			push(@out, $str);
 		}
 	}
@@ -7141,8 +6942,6 @@
 {
         my ($self, $type, $len, $precision, $scale) = @_;
 
-	$type = uc($type); # Force uppercase
-
 	if ($self->{is_mysql}) {
 		return Ora2Pg::MySQL::_sql_type($self, $type, $len, $precision, $scale);
 	}
@@ -7150,17 +6949,17 @@
 	my $data_type = '';
 
 	# Simplify timestamp type
-	$type =~ s/TIMESTAMP\(\d+\)/TIMESTAMP/;
+	$type =~ s/TIMESTAMP\(\d+\)/TIMESTAMP/i;
 
 	# Interval precision for year/month/day is not supported by PostgreSQL
-	if ($type =~ /INTERVAL/) {
-		$type =~ s/(INTERVAL\s+YEAR)\s*\(\d+\)/$1/;
-		$type =~ s/(INTERVAL\s+YEAR\s+TO\s+MONTH)\s*\(\d+\)/$1/;
-		$type =~ s/(INTERVAL\s+DAY)\s*\(\d+\)/$1/;
+	if ($type =~ /INTERVAL/i) {
+		$type =~ s/(INTERVAL\s+YEAR)\s*\(\d+\)/$1/i;
+		$type =~ s/(INTERVAL\s+YEAR\s+TO\s+MONTH)\s*\(\d+\)/$1/i;
+		$type =~ s/(INTERVAL\s+DAY)\s*\(\d+\)/$1/i;
 		# maximum precision allowed for seconds is 6
 		if ($type =~ /INTERVAL\s+DAY\s+TO\s+SECOND\s*\((\d+)\)/) {
 			if ($1 > 6) {
-				$type =~ s/(INTERVAL\s+DAY\s+TO\s+SECOND)\s*\(\d+\)/$1(6)/;
+				$type =~ s/(INTERVAL\s+DAY\s+TO\s+SECOND)\s*\(\d+\)/$1(6)/i;
 			}
 		}
 	}
@@ -7169,18 +6968,11 @@
 	if ( ($type eq 'NUMBER') && $precision ) {
 		$len = $precision;
 	} elsif ( ($type eq 'NUMBER') && ($len == 38) ) {
-		if ($scale eq '0' && $precision eq '') {
-			# Allow custom type rewrite for NUMBER(*,0)
-			return $self->{data_type}{'NUMBER(*,0)'} if (exists $self->{data_type}{'NUMBER(*,0)'});
-		}
 		$precision = $len;
-	} elsif ( $type =~ /CHAR/ && $len && exists $self->{data_type}{"$type($len)"}) {
-		return $self->{data_type}{"$type($len)"};
-	} elsif ( $type =~ /RAW/ && $len && exists $self->{data_type}{"$type($len)"}) {
-		return $self->{data_type}{"$type($len)"};
 	}
 
-        if (exists $self->{data_type}{$type}) {
+        if (exists $self->{data_type}{uc($type)}) {
+		$type = uc($type); # Force uppercase
 		if ($len) {
 
 			if ( ($type eq "CHAR") || ($type eq "NCHAR") || ($type =~ /VARCHAR/) ) {
@@ -7649,7 +7441,7 @@
 	$condition .= $self->limit_to_objects('CKEY', 'CONSTRAINT_NAME');
 
 	my $sth = $self->{dbh}->prepare(<<END) or $self->logit("FATAL: " . $self->{dbh}->errstr . "\n", 0, 1);
-SELECT CONSTRAINT_NAME,R_CONSTRAINT_NAME,SEARCH_CONDITION,DELETE_RULE,DEFERRABLE,DEFERRED,R_OWNER,TABLE_NAME,OWNER,VALIDATED
+SELECT CONSTRAINT_NAME,R_CONSTRAINT_NAME,SEARCH_CONDITION,DELETE_RULE,DEFERRABLE,DEFERRED,R_OWNER,TABLE_NAME,OWNER
 FROM $self->{prefix}_CONSTRAINTS
 WHERE CONSTRAINT_TYPE='C' $condition
 AND STATUS='ENABLED'
@@ -7662,8 +7454,7 @@
 		if ($self->{export_schema} && !$self->{schema}) {
 			$row->[7] = "$row->[8].$row->[7]";
 		}
-		$data{$row->[7]}{constraint}{$row->[0]}{condition} = $row->[2];
-		$data{$row->[7]}{constraint}{$row->[0]}{validate}  = $row->[9];
+		$data{$row->[7]}{constraint}{$row->[0]} = $row->[2];
 	}
 
 	return %data;
@@ -7728,14 +7519,11 @@
     CONS_R.TABLE_NAME R_TABLE_NAME,
     CONS.R_CONSTRAINT_NAME,
     COLS_R.COLUMN_NAME R_COLUMN_NAME,
-    CONS.SEARCH_CONDITION,CONS.DELETE_RULE,$defer,CONS.DEFERRED,
-    CONS.OWNER,CONS.R_OWNER,
-    COLS.POSITION,COLS_R.POSITION,
-    CONS.VALIDATED
+    CONS.SEARCH_CONDITION,CONS.DELETE_RULE,$defer,CONS.DEFERRED,CONS.OWNER,CONS.R_OWNER,COLS.POSITION,COLS_R.POSITION
 FROM $self->{prefix}_CONSTRAINTS CONS
     LEFT JOIN $self->{prefix}_CONS_COLUMNS COLS ON (COLS.CONSTRAINT_NAME = CONS.CONSTRAINT_NAME AND COLS.OWNER = CONS.OWNER AND COLS.TABLE_NAME = CONS.TABLE_NAME)
-    LEFT JOIN $self->{prefix}_CONSTRAINTS CONS_R ON (CONS_R.CONSTRAINT_NAME = CONS.R_CONSTRAINT_NAME AND CONS_R.OWNER = CONS.R_OWNER)
-    LEFT JOIN $self->{prefix}_CONS_COLUMNS COLS_R ON (COLS_R.CONSTRAINT_NAME = CONS.R_CONSTRAINT_NAME AND COLS_R.POSITION=COLS.POSITION AND COLS_R.OWNER = CONS.R_OWNER)
+    LEFT JOIN $self->{prefix}_CONSTRAINTS CONS_R ON (CONS_R.CONSTRAINT_NAME = CONS.R_CONSTRAINT_NAME AND CONS_R.OWNER = CONS.OWNER)
+    LEFT JOIN $self->{prefix}_CONS_COLUMNS COLS_R ON (COLS_R.CONSTRAINT_NAME = CONS.R_CONSTRAINT_NAME AND COLS_R.POSITION=COLS.POSITION AND COLS_R.OWNER = COLS.OWNER)
 WHERE CONS.CONSTRAINT_TYPE = 'R' $condition
 ORDER BY CONS.TABLE_NAME, CONS.CONSTRAINT_NAME, COLS.POSITION
 END
@@ -7753,7 +7541,7 @@
 			$local_table = "$row->[10].$row->[0]";
 			$remote_table = "$row->[11].$row->[3]";
 		}
-		push(@{$data{$local_table}}, [ ($row->[1],$row->[4],$row->[6],$row->[7],$row->[8],$row->[9],$row->[11],$row->[0],$row->[10],$row->[14]) ]);
+		push(@{$data{$local_table}}, [ ($row->[1],$row->[4],$row->[6],$row->[7],$row->[8],$row->[9],$row->[11],$row->[0],$row->[10]) ]);
 		#            TABLENAME     CONSTNAME           COLNAME
 		push(@{$link{$local_table}{$row->[1]}{local}}, $row->[2]);
 		#            TABLENAME     CONSTNAME          TABLENAME        COLNAME
@@ -7963,10 +7751,6 @@
 
 	return Ora2Pg::MySQL::_get_indexes($self,$table,$owner) if ($self->{is_mysql});
 
-	# Retrieve FTS indexes information before.
-	my %idx_info = ();
-	%idx_info = $self->_get_fts_indexes_info($owner) if ($self->_table_exists('CTXSYS', 'CTX_INDEX_VALUES'));
-
 	my $sub_owner = '';
 	if ($owner) {
 		$sub_owner = "AND A.INDEX_OWNER=B.TABLE_OWNER";
@@ -8049,10 +7833,10 @@
 
 		if ($self->{preserve_case}) {
 			if (($row->[1] !~ /".*"/) && ($row->[1] !~ /\(.*\)/)) {
-				$row->[1] = "\"$row->[1]\"";
+				$row->[1] =~ s/^/"/;
+				$row->[1] =~ s/$/"/;
 			}
 		}
-
 		# Index with DESC are declared as FUNCTION-BASED, fix that
 		if (($row->[4] =~ /FUNCTION-BASED/i) && ($row->[1] !~ /\(.*\)/)) {
 			$row->[4] =~ s/FUNCTION-BASED\s*//;
@@ -8064,13 +7848,6 @@
 		} else {
 			$idx_type{$row->[-6]}{$row->[0]}{type} = $row->[4];
 		}
-		my $idx_name = $row->[0];
-		if (!$self->{schema} && $self->{export_schema}) {
-			$idx_name = "$row->[-5].$row->[0]";
-		}
-		if (exists $idx_info{$idx_name}) {
-			$idx_type{$row->[-6]}{$row->[0]}{stemmer} = $idx_info{$idx_name}{stemmer};
-		}
 		if ($row->[-3] =~ /SPATIAL_INDEX/) {
 			$idx_type{$row->[-6]}{$row->[0]}{type} = 'SPATIAL INDEX';
 			if ($row->[-2] =~ /layer_gtype=([^\s,]+)/i) {
@@ -8087,50 +7864,10 @@
 		$index_tablespace{$row->[-6]}{$row->[0]} = $row->[-4];
 
 	}
-	$sth->finish();
-	$sth2->finish();
 
 	return \%unique, \%data, \%idx_type, \%index_tablespace;
 }
 
-=head2 _get_fts_indexes_info
-
-This function retrieve FTS index attributes informations
-
-Returns a hash of containing all useful attribute values for all FTS indexes
-
-=cut
-
-sub _get_fts_indexes_info
-{
-	my ($self, $owner) = @_;
-
-	my $condition = '';
-	$condition .= "AND IXV_INDEX_OWNER='$owner' " if ($owner);
-	$condition .= $self->limit_to_objects('INDEX', "IXV_INDEX_NAME");
-
-	# Retrieve all indexes informations
-	my $sth = $self->{dbh}->prepare(<<END) or $self->logit("FATAL: " . $self->{dbh}->errstr . "\n", 0, 1);
-SELECT DISTINCT IXV_INDEX_OWNER,IXV_INDEX_NAME,IXV_CLASS,IXV_ATTRIBUTE,IXV_VALUE
-FROM CTXSYS.CTX_INDEX_VALUES
-WHERE (IXV_CLASS='WORDLIST' AND IXV_ATTRIBUTE='STEMMER') $condition
-ORDER BY IXV_INDEX_NAME
-END
-
-	$sth->execute or $self->logit("FATAL: " . $self->{dbh}->errstr . "\n", 0, 1);
-	my %indexes_info = ();
-	while (my $row = $sth->fetch) {
-		my $save_idx = $row->[1];
-		if (!$self->{schema} && $self->{export_schema}) {
-			$row->[1] = "$row->[0].$row->[1]";
-		}
-		$indexes_info{$row->[1]}{"\L$row->[3]\E"} = $row->[4];
-	}
-
-	return %indexes_info;
-}
-
-
 
 =head2 _get_sequences
 
@@ -9596,11 +9333,11 @@
 	for (my $idx = 0; $idx < scalar(@$data_types); $idx++) {
 		my $hs={};
 		$hs->{geometry} = $src_data_types->[$idx] =~ /GEOMETRY/i ? 1 : 0;
-		$hs->{isnum} =    $data_types->[$idx] !~ /^(char|varchar|date|time|text|bytea|xml|uuid)/i ? 1 :0;
+		$hs->{isnum} =    $data_types->[$idx] !~ /^(char|varchar|date|time|text|bytea|xml)/i ? 1 :0;
 		$hs->{isdate} =  $data_types->[$idx] =~ /^(date|time)/i ? 1 : 0;
 		$hs->{raw} = $src_data_types->[$idx] =~ /RAW/i ? 1 : 0;
 		$hs->{clob} = $src_data_types->[$idx] =~ /CLOB/i ? 1 : 0;
-		$hs->{istext} = $data_types->[$idx] =~ /(char|text|xml|uuid)/i ? 1 : 0;
+		$hs->{istext} = $data_types->[$idx] =~ /(char|text|xml)/i ? 1 : 0;
 		$hs->{isbytea} = $data_types->[$idx] =~ /bytea/i ? 1 : 0;
 		$hs->{isbit} = $data_types->[$idx] =~ /bit/i ? 1 : 0;
 		$hs->{isnotnull} = 0;
@@ -9649,6 +9386,7 @@
 	} else {
 		 $self->{fhout}->print($data);
 	}
+
 }
 
 
@@ -9672,12 +9410,11 @@
 		$filename = "tmp_$filename";
 	}
 	# Set file temporary until the table export is done
-	$self->logit("Dumping data from $rname to file: $filename\n", 1);
+	$self->logit("Dumping data from $rname to file: $dirprefix${rname}_$self->{output}\n", 1);
 
 	if ( ($self->{jobs} > 1) || ($self->{oracle_copies} > 1) ) {
 		$self->{fhout}->close() if (defined $self->{fhout} && !$self->{file_per_table} && !$self->{pg_dsn});
 		my $fh = $self->append_export_file($filename);
-		set_binmode($fh);
 		flock($fh, 2) || die "FATAL: can't lock file $dirprefix$filename\n";
 		$fh->print($data);
 		$self->close_export_file($fh);
@@ -9687,7 +9424,6 @@
 	} elsif ($self->{file_per_table}) {
 		if ($self->{file_per_table} && $pname) {
 			my $fh = $self->append_export_file($filename);
-			set_binmode($fh);
 			$fh->print($data);
 			$self->close_export_file($fh);
 			$self->logit("Written " . length($data) . " bytes to $dirprefix$filename\n", 1);
@@ -9696,7 +9432,6 @@
 			if ($self->{compress} eq 'Zlib') {
 				$self->{cfhout}->gzwrite($data) or $self->logit("FATAL: error writing compressed data\n", 0, 1);
 			} else {
-				set_binmode($self->{cfhout});
 				$self->{cfhout}->print($data);
 			}
 		}
@@ -9704,11 +9439,6 @@
 		$self->dump($data);
 	}
 
-	# Rename temporary output file
-	if ($self->{file_per_table} && -e "$dirprefix$filename") {
-		$self->logit("Renaming temporary file $dirprefix$filename into ${dirprefix}${rname}_$self->{output}\n", 1);
-		rename("$dirprefix$filename", "${dirprefix}${rname}_$self->{output}");
-	}
 }
 
 =head2 read_config
@@ -9894,7 +9624,7 @@
 	}
 	#push(@functions, "$before\n") if ($before);
 
-	map { s/\bEND\s+(?!IF|LOOP|CASE|INTO|FROM|,)[a-z0-9_]+\s*;/END;/igs; } @functions;
+	map { s/END\s+(?!IF|LOOP|CASE|INTO|FROM|,)[a-z0-9_]+\s*;/END;/igs; } @functions;
 
 	return @functions;
 }
@@ -9909,7 +9639,7 @@
 
 sub _convert_package
 {
-	my ($self, $plsql, $owner, $hrefcomment) = @_;
+	my ($self, $plsql, $owner) = @_;
 
 	my $dirprefix = '';
 	$dirprefix = "$self->{output_dir}/" if ($self->{output_dir});
@@ -9917,9 +9647,7 @@
 	if ($self->{package_as_schema}) {
 		$content = "-- PostgreSQL does not recognize PACKAGES, using SCHEMA instead.\n";
 	}
-	if ($self->{package_as_schema} && ($plsql =~ /PACKAGE\s+BODY\s*([^\s]+)\s*(AS|IS)\s*/is)) {
-		my $pname = $1;
-		$pname =~ s/"//g;
+	if ($self->{package_as_schema} && ($plsql =~ /PACKAGE\s+BODY\s*[^\s]+\s*(AS|IS)\s*/is)) {
 		if (!$self->{preserve_case}) {
 			$content .= "DROP SCHEMA $self->{pg_supports_ifexists} $pname CASCADE;\n";
 			$content .= "CREATE SCHEMA $pname;\n";
@@ -9939,15 +9667,16 @@
 		}
 	}
 
+
 	# Convert type from the package header
-	if ($plsql =~ s/(?:CREATE\s+OR\s+REPLACE\s+)?PACKAGE\s+BODY\s*PACKAGE\s+([^\s]+)\s+(AS|IS)\s+TYPE\s+([^\s]+)\s+(AS|IS)\s+(.*;?)\s*PACKAGE BODY/PACKAGE BODY/is) {
+	if ($plsql =~ s/PACKAGE\s+BODY\s*PACKAGE\s+([^\s]+)\s+(AS|IS)\s+TYPE\s+([^\s]+)\s+(AS|IS)\s+(.*;?)\s*PACKAGE BODY/PACKAGE BODY/is) {
 		my $pname = $1;
 		my $type = $3;
 		my $params = $5;
 		$params =~ s/(PROCEDURE|FUNCTION)\s+(.*?);//gis;
 		$params =~ s/\s+END[^;]*;\s*$//is;
 		$params =~ s/CREATE TYPE/TYPE/gis;
-		while ($params =~ s/(?<!\%)TYPE\s+([^\s\.]+\s+)/CREATE TYPE $pname.$1/is) {
+		while ($params =~ s/TYPE\s+([^\s\.]+\s+)/CREATE TYPE $pname.$1/is) {
 			$self->{pkg_type}{$1} = "$pname.$1";
 		}
 		$params =~ s/\b$type\b/$pname.$type/gis;
@@ -9972,6 +9701,7 @@
 				$i++;
 			}
 		}
+		$i = 1;
 		foreach my $tpe (sort {$a->{pos} <=> $b->{pos}} @{$self->{types}}) {
 			$self->logit("Dumping type $tpe->{name}...\n", 1);
 			if ($self->{plsql_pgsql}) {
@@ -9980,54 +9710,18 @@
 				$tpe->{code} = "CREATE OR REPLACE $tpe->{code}\n";
 			}
 			$content .= $tpe->{code} . "\n";
+			$i++;
 		}
 	}
 
 	# Convert the package body part
 	if ($plsql =~ /PACKAGE\s+BODY\s*([^\s]+)\s*(AS|IS)\s*(.*)/is) {
-
 		my $pname = $1;
 		my $type = $2;
 		my $ctt = $3;
-		my $glob_declare = $`;
-
 		$pname =~ s/"//g;
 		$pname =~ s/^.*\.//g;
 		$self->logit("Dumping package $pname...\n", 1);
-
-		# Process package spec to extract global variables
-		if ($glob_declare) {
-			# Remove header of the package decalaration
-			$glob_declare =~ s/^CREATE(.*?)\s+AS\s+//;
-			# Remove all function declaration
-			$glob_declare =~ s/(PROCEDURE|FUNCTION)[^;]+;//gis;
-			# Remove end of the package decalaration
-			$glob_declare =~ s/\s+END[^;]*;\s*$//is;
-			my @cursors = ();
-			while ($glob_declare =~ s/(CURSOR\s+[^;]+\s+RETURN\s+[^;]+;)//) {
-				push(@cursors, $1);
-			}
-			# Extract TYPE declaration
-			my $i = 0;
-			while ($glob_declare =~ s/TYPE\s+([^\s]+)\s+(AS|IS)\s+([^;]+;)//) {
-				$self->{pkg_type}{$1} = "$pname.$1";
-				my $code = "TYPE $self->{pkg_type}{$1} AS $3";
-				push(@{$self->{types}}, { ('name' => $1, 'code' => $code, 'pos' => $i++) });
-			}
-			# Then dump custom type
-			foreach my $tpe (sort {$a->{pos} <=> $b->{pos}} @{$self->{types}}) {
-				$self->logit("Dumping type $tpe->{name}...\n", 1);
-				if ($self->{plsql_pgsql}) {
-					$tpe->{code} = $self->_convert_type($tpe->{code}, $tpe->{owner});
-				} else {
-					$tpe->{code} = "CREATE OR REPLACE $tpe->{code}\n";
-				}
-				$content .= $tpe->{code} . "\n";
-				$i++;
-			}
-			$content .= join("\n", @cursors) . "\n";
-			$glob_declare = $self->register_global_variable($pname, $glob_declare);
-		}
 		if ($self->{file_per_function}) {
 			my $dir = lc("$dirprefix$pname");
 			if (!-d "$dir") {
@@ -10039,13 +9733,14 @@
 				}
 			}
 		}
-		$ctt =~ s/\bEND[^;]*;$//is;
+		$self->{idxcomment} = 0;
+		$ctt =~ s/END[^;]*;$//is;
+		my %comments = $self->_remove_comments(\$ctt);
 		my @functions = $self->_extract_functions($ctt);
 
-
 		# Try to detect local function
 		for (my $i = 0; $i <= $#functions; $i++) {
-			my %fct_detail = $self->_lookup_function($functions[$i], $pname);
+			my %fct_detail = $self->_lookup_function($functions[$i]);
 			if (!exists $fct_detail{name}) {
 				$functions[$i] = '';
 				next;
@@ -10076,14 +9771,13 @@
 		$self->{pkgcost} = 0;
 		foreach my $f (@functions) {
 			next if (!$f);
-			$content .= $self->_convert_function($owner, $f, $pname, $hrefcomment);
+			$content .= $self->_convert_function($owner, $f, $pname, \%comments);
 		}
+		$self->_restore_comments(\$content, \%comments);
 		if ($self->{estimate_cost}) {
 			$self->{total_pkgcost} += $self->{pkgcost} || 0;
 		}
-
 	}
-
 	return $content;
 }
 
@@ -10170,7 +9864,7 @@
 
 	my %fct_detail = ();
 	if (!$self->{is_mysql}) {
-		%fct_detail = $self->_lookup_function($plsql, $pname);
+		%fct_detail = $self->_lookup_function($plsql);
 	} else {
 		%fct_detail = $self->_lookup_function($pname);
 		$pname = '';
@@ -10211,23 +9905,12 @@
 		$search_path = $self->set_search_path($owner);
 	}
 
-	my @nout = $fct_detail{args} =~ /\bOUT\s+([^,\)]+)/igs;
-	my @ninout = $fct_detail{args} =~ /\bINOUT\s+([^,\)]+)/igs;
 	if ($fct_detail{hasreturn}) {
-		my $nbout = $#nout+1 + $#ninout+1;
-		if ($nbout > 1) {
-			# Return record type 
-			$func_return = " RETURNS RECORD AS \$body\$\n";
-		} elsif ($nbout == 1 && $#nout == 0) {
-			my $typout = $nout[0];
-			$typout = $self->_sql_type($typout) || $typout;
-			# Return type returned by the function
-			$func_return = " RETURNS $typout AS \$body\$\n";
-		} else {
-			# Returns the right type
-			$func_return = " RETURNS$fct_detail{setof} $fct_detail{func_ret_type} AS \$body\$\n";
-		}
+		# Returns the right type
+		$func_return = " RETURNS$fct_detail{setof} $fct_detail{func_ret_type} AS \$body\$\n";
 	} else {
+		my @nout = $fct_detail{args} =~ /\bOUT /igs;
+		my @ninout = $fct_detail{args} =~ /\bINOUT /igs;
 		# Return void when there's no out parameters
 		if (($#nout < 0) && ($#ninout < 0)) {
 			$func_return = " RETURNS VOID AS \$body\$\n";
@@ -10296,8 +9979,6 @@
 --
 -- dblink wrapper to call function $name as an autonomous transaction
 --
-CREATE EXTENSION IF NOT EXISTS dblink;
-
 };
 		if (!$fct_detail{hasreturn}) {
 			$at_wrapper .= "CREATE OR REPLACE FUNCTION $name $fct_detail{args} RETURNS VOID AS \$body\$";
@@ -10340,8 +10021,6 @@
 --
 -- pg_background wrapper to call function $name as an autonomous transaction
 --
-CREATE EXTENSION IF NOT EXISTS pg_background;
-
 };
 		if (!$fct_detail{hasreturn}) {
 			$at_wrapper .= "CREATE OR REPLACE FUNCTION $name $fct_detail{args} RETURNS VOID AS \$body\$";
@@ -10391,7 +10070,6 @@
 	my $revoke = '';
 	if ($fct_detail{code}) {
 		$fct_detail{declare} = '' if ($fct_detail{declare} !~ /[a-z]/is);
-		$fct_detail{declare} =~ s/^\s*DECLARE//;
 		$function .= "DECLARE\n$fct_detail{declare}\n" if ($fct_detail{declare});
 		$function .= $fct_detail{code};
 		$function .= "\n\$body\$\nLANGUAGE PLPGSQL\n";
@@ -10429,7 +10107,7 @@
 		$fname =~ s/"//g; # Remove case sensitivity quoting
 		$self->logit("\tDumping to one file per function: $dirprefix\L$pname/$fname\E_$self->{output}\n", 1);
 		my $sql_header = "-- Generated by Ora2Pg, the Oracle database Schema converter, version $VERSION\n";
-		$sql_header .= "-- Copyright 2000-2017 Gilles DAROLD. All rights reserved.\n";
+		$sql_header .= "-- Copyright 2000-2016 Gilles DAROLD. All rights reserved.\n";
 		$sql_header .= "-- DATASOURCE: $self->{oracle_dsn}\n\n";
 		if ($self->{client_encoding}) {
 			$sql_header .= "SET client_encoding TO '\U$self->{client_encoding}\E';\n";
@@ -10437,7 +10115,6 @@
 		$sql_header .= $self->set_search_path();
 
 		my $fhdl = $self->open_export_file("$dirprefix\L$pname/$fname\E_$self->{output}", 1);
-		set_binmode($fhdl);
 		$self->_restore_comments(\$function, $hrefcomments);
 		$self->dump($sql_header . $function, $fhdl);
 		$self->close_export_file($fhdl);
@@ -10570,7 +10247,7 @@
 		}
 	}
 	if ($self->{plsql_pgsql}) {
-			$sqlstr = Ora2Pg::PLSQL::convert_plsql_code($self, $sqlstr);
+			$sqlstr = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $sqlstr);
 	}
 
 	return $sqlstr;
@@ -10659,15 +10336,13 @@
 	my $unsupported = "-- Unsupported, please edit to match PostgreSQL syntax\n";
 	my $content = '';
 	my $type_name = '';
-
-	$plsql =~ s/\s*INDEX\s+BY\s+([^\s;]+)//is;
 	if ($plsql =~ /TYPE\s+([^\s]+)\s+(IS|AS)\s*TABLE\s*OF\s+(.*)/is) {
 		$type_name = $1;
 		my $type_of = $3;
 		if ($self->{export_schema} && !$self->{schema} && $owner) {
 			$type_name = "$owner.$type_name";
 		}
-		$type_of =~ s/\s*NOT[\t\s]+NULL//is;
+		$type_of =~ s/\s*NOT[\t\s]+NULL//s;
 		$type_of =~ s/\s*;$//s;
 		$type_of =~ s/^\s+//s;
 		if ($type_of !~ /\s/s) { 
@@ -10679,11 +10354,6 @@
 			$self->logit("WARNING: this kind of Nested Tables are not supported, skipping type $1\n", 1);
 			return "${unsupported}CREATE OR REPLACE $plsql";
 		}
-	} elsif ($plsql =~ /TYPE\s+([^\s]+)\s+(AS|IS)\s*REF\s+CURSOR/is) {
-		$self->logit("WARNING: TYPE REF CURSOR are not supported, skipping type $1\n", 1);
-		$plsql =~ s/\bREF\s+CURSOR/REFCURSOR/is;
-		$self->{type_of_type}{'Type Ref Cursor'}++;
-		return "${unsupported}CREATE OR REPLACE $plsql";
 	} elsif ($plsql =~ /TYPE\s+([^\s]+)\s+(AS|IS)\s*OBJECT\s*\((.*?)(TYPE BODY.*)/is) {
 		$self->{type_of_type}{'Type Boby'}++;
 		$self->logit("WARNING: TYPE BODY are not supported, skipping type $1\n", 1);
@@ -10753,7 +10423,6 @@
 			$type_name = "$owner.$type_name";
 		}
 		my $declar = Ora2Pg::PLSQL::replace_sql_type($tbname, $self->{pg_numeric_type}, $self->{default_numeric}, $self->{pg_integer_type});
-		$declar =~ s/[\n\r]+//s;
 		$content = qq{
 CREATE TYPE \L$type_name\E AS ($type_name $declar\[$size\]);
 };
@@ -11251,14 +10920,13 @@
 	}
 	$outfile .= $table . '_error.log';
 
-	my $filehdl = new IO::File;
-	$filehdl->open(">>$outfile") or $self->logit("FATAL: Can't write to $outfile: $!\n", 0, 1);
-	$filehdl->print($s_out);
+	open(OUTERROR, ">>$outfile") or $self->logit("FATAL: can not write to $outfile, $!\n", 0, 1);
+	print OUTERROR "$s_out";
 	foreach my $row (@$rows) {
-		$filehdl->print(join("\t", @$row) . "\n");
+		print OUTERROR join("\t", @$row), "\n";
 	}
-	$filehdl->print("\\.\n");
-	$filehdl->close();
+	print OUTERROR "\\.\n";
+	close(OUTERROR);
 
 }
 
@@ -11272,10 +10940,9 @@
 	}
 	$outfile .= $table . '_error.log';
 
-	my $filehdl = new IO::File;
-	$filehdl->open(">>$outfile") or $self->logit("FATAL: Can't write to $outfile: $!\n", 0, 1);
-	$filehdl->print("$sql_out\n");
-	$filehdl->close();
+	open(OUTERROR, ">>$outfile") or $self->logit("FATAL: can not write to $outfile, $!\n", 0, 1);
+	print OUTERROR "$sql_out\n";
+	close(OUTERROR);
 
 }
 
@@ -11462,6 +11129,12 @@
 	my $tt_record = @$rows;
 	$dbhdest->disconnect() if ($dbhdest);
 
+        # Set file temporary until the table export is done
+        my $filename = $self->{output};
+        if ($self->{file_per_table}) {
+                $filename = "${rname}_$self->{output}";
+        }
+ 
 	my $end_time = time();
 	$ora_start_time = $end_time if (!$ora_start_time);
 	my $dt = $end_time - $ora_start_time;
@@ -11855,7 +11528,7 @@
 			} elsif ($typ eq 'TYPE') {
 				my $total_type = 0;
 				foreach my $t (sort keys %{$self->{type_of_type}}) {
-					$total_type++ if (!grep(/^$t$/, 'Associative Arrays','Type Boby','Type with member method', 'Type Ref Cursor'));
+					$total_type++ if (!grep(/^$t$/, 'Associative Arrays','Type Boby','Type with member method'));
 					$report_info{'Objects'}{$typ}{'detail'} .= "\L$self->{type_of_type}{$t} $t\E\n" if ($self->{type_of_type}{$t});
 				}
 				$report_info{'Objects'}{$typ}{'cost_value'} = ($Ora2Pg::PLSQL::OBJECT_SCORE{$typ}*$total_type) if ($self->{estimate_cost});
@@ -11982,20 +11655,14 @@
 			} elsif ($typ eq 'VIEW') {
 				my %view_infos = $self->_get_views();
 				foreach my $view (sort keys %view_infos) {
-
-					# Remove unsupported definitions from the ddl statement
-					$view_infos{$view}{text} =~ s/\s*WITH\s+READ\s+ONLY//is;
-					$view_infos{$view}{text} =~ s/\s*OF\s+([^\s]+)\s+(WITH|UNDER)\s+[^\)]+\)//is;
-					$view_infos{$view}{text} =~ s/\s*OF\s+XMLTYPE\s+[^\)]+\)//is;
-					$view_infos{$view}{text} = $self->_format_view($view_infos{$view}{text});
-
 					if ($self->{estimate_cost}) {
-						my ($cost, %cost_detail) = Ora2Pg::PLSQL::estimate_cost($self, $view_infos{$view}{text}, 'VIEW');
+						my ($cost, %cost_detail) = Ora2Pg::PLSQL::estimate_cost($self, $view_infos{$view}{text});
+						next if ($cost <= ($cost_detail{SIZE}+$cost_detail{TEST}));
+						$cost -= ($cost_detail{SIZE} + $cost_detail{TEST});
+						$cost = sprintf("%.1f", $cost);
+						delete $cost_detail{SIZE};
+						delete $cost_detail{TEST};
 						$report_info{'Objects'}{$typ}{'cost_value'} += $cost;
-						# Do not show view that just have to be tested
-						next if (!$cost);
-						$cost += $Ora2Pg::PLSQL::OBJECT_SCORE{'VIEW'};
-						# Show detail about views that might need manual rewritting
 						$report_info{'Objects'}{$typ}{'detail'} .= "\L$view: $cost\E\n";
 						$report_info{full_view_details}{"\L$view\E"}{count} = $cost;
 						foreach my $d (sort { $cost_detail{$b} <=> $cost_detail{$a} } keys %cost_detail) {
@@ -12003,7 +11670,7 @@
 							$report_info{full_view_details}{"\L$view\E"}{info} .= "\t$d => $cost_detail{$d}";
 							$report_info{full_view_details}{"\L$view\E"}{info} .= " (cost: ${$uncovered_score}{$d})" if (${$uncovered_score}{$d});
 							$report_info{full_view_details}{"\L$view\E"}{info} .= "\n";
-							push(@{$report_info{full_view_details}{"\L$view\E"}{keywords}}, $d); 
+							push(@{$report_info{full_view_details}{"\L$view\E"}{keywords}}, $d) if (($d ne 'SIZE') && ($d ne 'TEST')); 
 						}
 					}
 				}
@@ -12036,7 +11703,7 @@
 			my %queries = $self->_get_audit_queries();
 			foreach my $q (sort {$a <=> $b} keys %queries) {
 				$report_info{'Objects'}{'QUERY'}{'number'}++;
-				my $sql_q = Ora2Pg::PLSQL::convert_plsql_code($self, $queries{$q});
+				my $sql_q = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $queries{$q});
 				if ($self->{estimate_cost}) {
 					my ($cost, %cost_detail) = Ora2Pg::PLSQL::estimate_cost($self, $sql_q, 'QUERY');
 					$cost += $Ora2Pg::PLSQL::OBJECT_SCORE{'QUERY'};
@@ -12559,7 +12226,7 @@
 			next if (!exists $tables_infos{$t});
 			my $nbcheck = 0;
 			foreach my $cn (keys %{$check_constraints{$t}{constraint}}) {
-				$nbcheck++ if ($check_constraints{$t}{constraint}{$cn}{condition} !~ /IS NOT NULL$/);
+				$nbcheck++ if ($check_constraints{$t}{constraint}{$cn} !~ /IS NOT NULL$/);
 			}
 			print "$lbl:$t:$nbcheck\n";
 			if ($self->{pg_dsn}) {
@@ -13244,32 +12911,6 @@
         $sth;
 }
 
-=head2 _table_exists
-
-This function return the table name if the given table exists
-else returns a empty string.
-
-=cut
-
-sub _table_exists
-{
-	my ($self, $schema, $table) = @_;
-
-	return Ora2Pg::MySQL::_table_exists($self, $schema, $table) if ($self->{is_mysql});
-
-	my $ret = '';
-
-	my $sql = "SELECT TABLE_NAME FROM ALL_TABLES WHERE OWNER = '$schema' AND TABLE_NAME = '$table'";
-        my $sth = $self->{dbh}->prepare( $sql ) or return undef;
-        $sth->execute or return undef;
-	while ( my @row = $sth->fetchrow()) {
-		$ret = $row[0];
-	}
-        $sth->finish();
-	return $ret;
-}
-
-
 
 =head2 _get_largest_tables
 
@@ -13788,7 +13429,7 @@
 		}
 
 		# Always exclude unwanted tables
-		if (!$self->{is_mysql} && !$has_limitation && ($arr_type[$i] =~ /TABLE|SEQUENCE|VIEW|TRIGGER|TYPE/)) {
+		if (!$self->{is_mysql} && !$has_limitation && ($arr_type[$i] =~ /TABLE|SEQUENCE|VIEW|TRIGGER/)) {
 			if ($self->{db_version} =~ /Release [89]/) {
 				$str .= ' AND (';
 				foreach my $t (@EXCLUDED_TABLES_8I) {
@@ -13843,7 +13484,7 @@
 
 	# Set the check constraint definition 
 	foreach my $k (keys %{$check_constraint->{constraint}}) {
-		my $chkconstraint = $check_constraint->{constraint}->{$k}{condition};
+		my $chkconstraint = $check_constraint->{constraint}->{$k};
 		next if (!$chkconstraint);
 		my $skip_create = 0;
 		if (exists $check_constraint->{notnull}) {
@@ -13859,7 +13500,7 @@
 				}
 			}
 			if ($self->{plsql_pgsql}) {
-				$chkconstraint = Ora2Pg::PLSQL::convert_plsql_code($self, $chkconstraint);
+				$chkconstraint = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $chkconstraint);
 			}
 			next if ($nonotnull && ($chkconstraint =~ /IS NOT NULL/));
 			if (!$self->{preserve_case}) {
@@ -13871,9 +13512,7 @@
 				}
 				$k = lc($k);
 			}
-			my $validate = '';
-			$validate = ' NOT VALID' if ($check_constraint->{constraint}->{$k}{validate} eq 'NOT VALIDATED');
-			push(@chk_constr,  "ALTER TABLE $table ADD CONSTRAINT $k CHECK ($chkconstraint)$validate;\n");
+			push(@chk_constr,  "ALTER TABLE $table ADD CONSTRAINT $k CHECK ($chkconstraint);\n");
 		}
 	}
 
@@ -13901,12 +13540,12 @@
 		$pname =~ s/"//g;
 		$self->logit("Looking at package $pname...\n", 1);
 		$self->{idxcomment} = 0;
-		$content =~ s/\bEND[^;]*;$//is;
+		$content =~ s/END[^;]*;$//is;
 		my %comments = $self->_remove_comments(\$content);
 		my @functions = $self->_extract_functions($content);
 		foreach my $f (@functions) {
 			next if (!$f);
-			my %fct_detail = $self->_lookup_function($f, $pname);
+			my %fct_detail = $self->_lookup_function($f);
 			next if (!exists $fct_detail{name});
 			$fct_detail{name} =~ s/^.*\.//;
 			$fct_detail{name} =~ s/"//g;
@@ -13929,13 +13568,13 @@
 
 sub _lookup_function
 {
-	my ($self, $plsql, $pname) = @_;
+	my ($self, $plsql) = @_;
 
 	if ($self->{is_mysql}) {
 		if ($self->{type} eq 'FUNCTION') {
-			return Ora2Pg::MySQL::_lookup_function($self, $plsql, $pname);
+			return Ora2Pg::MySQL::_lookup_function($self, $plsql);
 		} else {
-			return Ora2Pg::MySQL::_lookup_procedure($self, $plsql, $pname);
+			return Ora2Pg::MySQL::_lookup_procedure($self, $plsql);
 		}
 	}
 
@@ -13954,14 +13593,7 @@
 		$fct_detail{type} = uc($2);
 		$fct_detail{name} = $3;
 		$fct_detail{args} = $4;
-		if ($fct_detail{before}) {
-			my @cursors = ();
-			while ($fct_detail{before} =~ s/(CURSOR\s+[^;]+\s+RETURN\s+[^;]+;)//) {
-				push(@cursors, $1);
-			}
-			$fct_detail{before} = $self->register_global_variable($pname, $fct_detail{before});
-			$fct_detail{before} = join("\n", @cursors) . "\n" . $fct_detail{before};
-		}
+
 		if ($fct_detail{args} =~ /\b(RETURN|IS|AS)\b/is) {
 			$fct_detail{args} = '()';
 		}
@@ -13971,7 +13603,6 @@
 
 		$fct_detail{immutable} = 1 if ($fct_detail{declare} =~ s/\bDETERMINISTIC\b//is);
 		$fct_detail{setof} = 1 if ($fct_detail{declare} =~ s/\bPIPELINED\b//is);
-		$fct_detail{declare} =~ s/DEFAULT/:=/igs;
 		if ($fct_detail{declare} =~ s/(.*?)RETURN\s+self\s+AS RESULT IS//is) {
 			$fct_detail{args} .= $1;
 			$fct_detail{hasreturn} = 1;
@@ -14007,18 +13638,9 @@
 		$fct_detail{declare} = Ora2Pg::PLSQL::replace_sql_type($fct_detail{declare}, $self->{pg_numeric_type}, $self->{default_numeric}, $self->{pg_integer_type});
 
 		# Replace PL/SQL code into PL/PGSQL similar code
-		$fct_detail{declare} = Ora2Pg::PLSQL::convert_plsql_code($self, $fct_detail{declare});
+		$fct_detail{declare} = Ora2Pg::PLSQL::plsql_to_plpgsql($self, $fct_detail{declare});
 		if ($fct_detail{code}) {
-			$fct_detail{code} = Ora2Pg::PLSQL::convert_plsql_code($self, "BEGIN".$fct_detail{code});
-		}
-		# Sometime variable used in FOR ... IN loop is not declared
-		# Append its RECORD declaration in the DECLARE section.
-		my $tmp_code = $fct_detail{code};
-		while ($tmp_code =~ s/FOR\s+([^\s]+)\s+IN//is) {
-			my $varname = $1;
-			if ($fct_detail{declare} !~ /\b$varname\s+/) {
-				$fct_detail{declare} .= "  $varname RECORD;\n";
-			}
+			$fct_detail{code} = Ora2Pg::PLSQL::plsql_to_plpgsql($self, "BEGIN".$fct_detail{code});
 		}
 		# Set parameters for AUTONOMOUS TRANSACTION
 		$fct_detail{args} =~ s/\s+/ /gs;
@@ -14033,18 +13655,7 @@
 		delete $fct_detail{declare};
 		$fct_detail{code} = $plsql;
 	}
-	# Replace call to global variables declared in this package
-	foreach my $n (keys %{$self->{global_variables}}) {
-		next if ($pname && (uc($n) !~ /^\U$pname\E\./));
-		my $i = 0;
-		while ($fct_detail{code} =~ s/\b$n\s*:=\s*([^;]+)\s*;/PERFORM set_config('$n', $1, false);/is) { last if ($i++ > 100); };
-		$i = 0;
-		while ($fct_detail{code} =~ s/([^\.]+)\b$self->{global_variables}{$n}{name}\s*:=\s*([^;]+);/$1PERFORM set_config('$n', $2, false);/is) { last if ($i++ > 100); };
-		$i = 0;
-		while ($fct_detail{code} =~ s/([^']+)\b$n\b([^']+)/$1current_setting('$n')::$self->{global_variables}{$n}{type}$2/is) { last if ($i++ > 100); };
-		$i = 0;
-		while ($fct_detail{code} =~ s/([^\.']+)\b$self->{global_variables}{$n}{name}\b([^']+)/$1current_setting('$n')::$self->{global_variables}{$n}{type}$2/is) { last if ($i++ > 100); };
-	}
+
 	return %fct_detail;
 }
 
@@ -14204,6 +13815,7 @@
 # Other object type
 #CLUSTER
 #CONSUMER GROUP
+#CONTEXT
 #DESTINATION
 #DIMENSION
 #EDITION
@@ -15002,48 +14614,6 @@
 	return $col;
 }
 
-sub register_global_variable
-{
-	my ($self, $pname, $glob_vars) = @_;
-
-	$glob_vars = Ora2Pg::PLSQL::replace_sql_type($glob_vars, $self->{pg_numeric_type}, $self->{default_numeric}, $self->{pg_integer_type});
-
-	# Replace PL/SQL code into PL/PGSQL similar code
-	$glob_vars = Ora2Pg::PLSQL::convert_plsql_code($self, $glob_vars);
-
-	my @vars = split(/\s*(\%ORA2PG_COMMENT\d+\%|;)\s*/, $glob_vars);
-	map { s/^\s+//; s/\s+$//; } @vars;
-	my $ret = '';
-	foreach my $l (@vars) {
-		if ($l eq ';' || $l =~ /ORA2PG_COMMENT/) {
-			$ret .= $l if ($l ne ';');
-			next;
-		}
-		$l =~ s/\-\-[^\r\n]+//sg;
-		my ($n, $type, @others) = split(/\s+/, $l);
-		$ret .= $l, next if (!$type);
-		if (!$n) {
-			$n = $type;
-			$type = $others[0] || '';
-		}
-		$ret .= $l, next if (uc($type) eq 'EXCEPTION');
-		next if (!$pname);
-		my $v = $pname . '.' . $n;
-		$self->{global_variables}{$v}{name} = $n;
-		if (uc($type) eq 'CONSTANT') {
-			$self->{global_variables}{$v}{constant} = 1;
-			$type = shift(@others);
-		}
-		if ($#others > 0 && $others[0] eq ':=') {
-			shift(@others);
-			$self->{global_variables}{$v}{default} = join(' ', @others);
-		}
-		$self->{global_variables}{$v}{type} = $type;
-	}
-
-	return $ret;
-}
-
 1;
 
 __END__
@@ -15056,7 +14626,7 @@
 
 =head1 COPYRIGHT
 
-Copyright (c) 2000-2017 Gilles Darold - All rights reserved.
+Copyright (c) 2000-2016 Gilles Darold - All rights reserved.
 
 	This program is free software: you can redistribute it and/or modify
 	it under the terms of the GNU General Public License as published by
diff -Nru ora2pg-18.0/packaging/README ora2pg-17.6/packaging/README
--- ora2pg-18.0/packaging/README	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/packaging/README	2016-11-18 05:45:49.000000000 +0800
@@ -12,13 +12,13 @@
 
 	The binary package may be found here:
 
-		~/rpmbuild/RPMS/noarch/ora2pg-18.0-1.noarch.rpm
+		~/rpmbuild/RPMS/noarch/ora2pg-17.6-1.noarch.rpm
 	or
-		/usr/src/redhat/RPMS/i386/ora2pg-18.0-1.noarch.rpm
+		/usr/src/redhat/RPMS/i386/ora2pg-17.6-1.noarch.rpm
 
 	To install run:
 
-		rpm -i ~/rpmbuild/RPMS/noarch/ora2pg-18.0-1.noarch.rpm
+		rpm -i ~/rpmbuild/RPMS/noarch/ora2pg-17.6-1.noarch.rpm
 
 
 slackbuild/
@@ -30,7 +30,7 @@
 	then take a look at /tmp/build/ to find the Slackware package.
 	To install run the following command:
 
-		installpkg /tmp/build/ora2pg-18.0-i386-1gda.tgz
+		installpkg /tmp/build/ora2pg-17.6-i486-1gda.tgz
 
 
 debian/
diff -Nru ora2pg-18.0/packaging/RPM/ora2pg.spec ora2pg-17.6/packaging/RPM/ora2pg.spec
--- ora2pg-18.0/packaging/RPM/ora2pg.spec	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/packaging/RPM/ora2pg.spec	2016-11-18 05:45:49.000000000 +0800
@@ -1,6 +1,6 @@
 Summary:	Oracle to PostgreSQL database schema converter
 Name:		ora2pg
-Version:	18.0
+Version:	17.6
 Release:	1%{?dist}
 Group:		Applications/Databases
 License:	GPLv3+
@@ -11,7 +11,7 @@
 
 BuildRequires:	perl
 Requires:	perl(DBD::Oracle)
-Requires:	perl-DBD-MySQL perl(DBI) perl(IO::Compress::Base)
+Requires:	perl-DBD-MySQL perl(DBI) perl(String::Random) perl(IO::Compress::Base)
 
 %description
 This package contains a Perl module and a companion script to convert an
@@ -55,8 +55,8 @@
 %{_docdir}/%{name}-%{version}/*
 
 %changelog
-* Sat Apr  9 2016 Gilles Darold <gilles@darold.net> 18.0
-- Update to 18.0
+* Sat Apr  9 2016 Gilles Darold <gilles@darold.net> 17.6
+- Update to 17.6
 - Append %{name} to CONFDIR and %{version} to DOCDIR in %files section
 
 * Fri Mar 25 2016 Devrim Gündüz <devrim@gunduz.org> 17.2-1
diff -Nru ora2pg-18.0/packaging/debian/ora2pg/DEBIAN/control ora2pg-17.6/packaging/debian/ora2pg/DEBIAN/control
--- ora2pg-18.0/packaging/debian/ora2pg/DEBIAN/control	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/packaging/debian/ora2pg/DEBIAN/control	2016-11-18 05:45:49.000000000 +0800
@@ -1,5 +1,5 @@
 Package: ora2pg
-Version: 18.0
+Version: 17.6
 Priority: optional
 Architecture: all
 Essential: no
diff -Nru ora2pg-18.0/packaging/debian/ora2pg/DEBIAN/copyright ora2pg-17.6/packaging/debian/ora2pg/DEBIAN/copyright
--- ora2pg-18.0/packaging/debian/ora2pg/DEBIAN/copyright	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/packaging/debian/ora2pg/DEBIAN/copyright	2016-11-18 05:45:49.000000000 +0800
@@ -13,7 +13,7 @@
 
 Copyright:
 
-    Copyright (c) 2000-2017 : Gilles Darold - All rights reserved
+    Copyright (c) 2000-2016 : Gilles Darold - All rights reserved
 
 License:
 
diff -Nru ora2pg-18.0/packaging/slackbuild/Ora2Pg.SlackBuild ora2pg-17.6/packaging/slackbuild/Ora2Pg.SlackBuild
--- ora2pg-18.0/packaging/slackbuild/Ora2Pg.SlackBuild	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/packaging/slackbuild/Ora2Pg.SlackBuild	2016-11-18 05:45:49.000000000 +0800
@@ -12,7 +12,7 @@
 
 ## Fill these variables to your needs ##
 NAMESRC=${NAMESRC:-ora2pg}
-VERSION=${VERSION:-18.0}
+VERSION=${VERSION:-17.6}
 EXT=${EXT:-tar.bz2}
 NAMEPKG=${NAMEPKG:-ora2pg}
 PKGEXT=${PKGEXT:-tgz/txz}
diff -Nru ora2pg-18.0/packaging/slackbuild/Ora2Pg.info ora2pg-17.6/packaging/slackbuild/Ora2Pg.info
--- ora2pg-18.0/packaging/slackbuild/Ora2Pg.info	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/packaging/slackbuild/Ora2Pg.info	2016-11-18 05:45:49.000000000 +0800
@@ -1,7 +1,7 @@
 PRGNAM="Ora2Pg"
-VERSION="18.0"
+VERSION="17.6"
 HOMEPAGE="http://ora2pg.darold.net/";
-DOWNLOAD="http://downloads.sourceforge.net/ora2pg/ora2pg-18.0.tar.gz";
+DOWNLOAD="http://downloads.sourceforge.net/ora2pg/ora2pg-17.6.tar.gz";
 MD5SUM=""
 DOWNLOAD_x86_64="UNTESTED"
 MD5SUM_x86_64=""
diff -Nru ora2pg-18.0/scripts/ora2pg ora2pg-17.6/scripts/ora2pg
--- ora2pg-18.0/scripts/ora2pg	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/scripts/ora2pg	2016-11-18 05:45:49.000000000 +0800
@@ -3,7 +3,7 @@
 # Project  : Oracle to Postgresql converter
 # Name     : ora2pg
 # Author   : Gilles Darold, gilles _AT_ darold _DOT_ net
-# Copyright: Copyright (c) 2000-2017 : Gilles Darold - All rights reserved -
+# Copyright: Copyright (c) 2000-2016 : Gilles Darold - All rights reserved -
 # Function : Script used to convert Oracle Database to PostgreSQL
 # Usage    : ora2pg configuration_file
 #------------------------------------------------------------------------------
@@ -31,7 +31,7 @@
 setlocale(LC_NUMERIC, '');
 setlocale(LC_ALL,     'C');
 
-my $VERSION = '18.0';
+my $VERSION = '17.6';
 
 $| = 1;
 
@@ -77,7 +77,6 @@
 my $PG_USER = '';
 my $PG_PWD = '';
 my $COUNT_ROWS = 0;
-my $DATA_TYPE = '';
 
 my @SCHEMA_ARRAY  = qw( TABLE PACKAGE VIEW GRANT SEQUENCE TRIGGER FUNCTION PROCEDURE TABLESPACE PARTITION TYPE MVIEW DBLINK SYNONYM DIRECTORY );
 my @EXTERNAL_ARRAY  = qw( KETTLE FDW );
@@ -98,7 +97,6 @@
         'b|basedir=s' => \$OUTDIR,
         'c|conf=s' => \$FILE_CONF,
         'd|debug!' => \$DEBUG,
-        'D|data_type=s' => \$DATA_TYPE,
 	'e|exclude=s' => \$EXCLUDE,
         'h|help!' => \$HELP,
 	'i|input_file=s' => \$INPUT_FILE,
@@ -273,7 +271,6 @@
 	pg_user => $PG_USER,
 	pg_pwd => $PG_PWD,
 	count_rows => $COUNT_ROWS,
-	data_type => $DATA_TYPE,
 );
 
 
@@ -312,7 +309,6 @@
     -c | --conf file  : Used to set an alternate configuration file than the
 			default /etc/ora2pg/ora2pg.conf.
     -d | --debug      : Enable verbose output.
-    -D | --data_type STR : Allow custom type replacement at command line.
     -e | --exclude str: coma separated list of objects to exclude from export.
 			Can be used with SHOW_COLUMN too.
     -h | --help       : Print this short help.
@@ -566,7 +562,6 @@
 		$conf_arr->[$i] =~ s/^#LONGREADLEN.*1047552/LONGREADLEN\t1047552/;
 		$conf_arr->[$i] =~ s/^AUTODETECT_SPATIAL_TYPE.*0/AUTODETECT_SPATIAL_TYPE\t1/;
 		$conf_arr->[$i] =~ s/^NO_LOB_LOCATOR.*/NO_LOB_LOCATOR\t0/;
-		$conf_arr->[$i] =~ s/^FTS_INDEX_ONLY.*0/FTS_INDEX_ONLY\t1/;
 		if ($DSN) {
 			$conf_arr->[$i] =~ s/^ORACLE_DSN.*/ORACLE_DSN\t$DSN/;
 		}
diff -Nru ora2pg-18.0/scripts/ora2pg_scanner ora2pg-17.6/scripts/ora2pg_scanner
--- ora2pg-18.0/scripts/ora2pg_scanner	2017-01-31 01:31:49.000000000 +0800
+++ ora2pg-17.6/scripts/ora2pg_scanner	2016-11-18 05:45:49.000000000 +0800
@@ -3,7 +3,7 @@
 # Project  : Oracle to Postgresql converter
 # Name     : ora2pg_scanner
 # Author   : Gilles Darold, gilles _AT_ darold _DOT_ net
-# Copyright: Copyright (c) 2000-2017 : Gilles Darold - All rights reserved -
+# Copyright: Copyright (c) 2000-2016 : Gilles Darold - All rights reserved -
 # Function : Script used to scan a list of DSN and generate reports
 # Usage    : ora2pg_scanner -l dsn_csv_file -o outdir
 #------------------------------------------------------------------------------
@@ -26,7 +26,7 @@
 
 use Getopt::Long qw(:config no_ignore_case bundling);
 
-my $VERSION = '18.0';
+my $VERSION = '17.6';
 
 my @DB_DNS = ();
 my $OUTDIR = '';

Attachment: signature.asc
Description: PGP signature


Reply to: